repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
yolain/ComfyUI-Easy-Use | py/easyNodes.py | [
{
"identifier": "advanced_encode",
"path": "py/adv_encode.py",
"snippet": "def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized = clip.tokenize(text, return_word_ids=True)\n if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):\n embs_l = None\n embs_g = None\n pooled = None\n if 'l' in tokenized and isinstance(clip.cond_stage_model, SDXLClipModel):\n embs_l, _ = advanced_encode_from_tokens(tokenized['l'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_l),\n w_max=w_max,\n return_pooled=False)\n if 'g' in tokenized:\n embs_g, pooled = advanced_encode_from_tokens(tokenized['g'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x,\n encode_token_weights_g),\n w_max=w_max,\n return_pooled=True,\n apply_to_pooled=apply_to_pooled)\n return prepareXL(embs_l, embs_g, pooled, clip_balance)\n else:\n return advanced_encode_from_tokens(tokenized['l'],\n token_normalization,\n weight_interpretation,\n lambda x: (clip.encode_from_tokens({'l': x}), None),\n w_max=w_max)"
},
{
"identifier": "advanced_encode_XL",
"path": "py/adv_encode.py",
"snippet": "def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized1 = clip.tokenize(text1, return_word_ids=True)\n tokenized2 = clip.tokenize(text2, return_word_ids=True)\n\n embs_l, _ = advanced_encode_from_tokens(tokenized1['l'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_l),\n w_max=w_max,\n return_pooled=False)\n\n embs_g, pooled = advanced_encode_from_tokens(tokenized2['g'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_g),\n w_max=w_max,\n return_pooled=True,\n apply_to_pooled=apply_to_pooled)\n\n gcd_num = gcd(embs_l.shape[1], embs_g.shape[1])\n repeat_l = int((embs_g.shape[1] / gcd_num) * embs_l.shape[1])\n repeat_g = int((embs_l.shape[1] / gcd_num) * embs_g.shape[1])\n\n return prepareXL(embs_l.expand((-1, repeat_l, -1)), embs_g.expand((-1, repeat_g, -1)), pooled, clip_balance)"
},
{
"identifier": "BASE_RESOLUTIONS",
"path": "py/config.py",
"snippet": "BASE_RESOLUTIONS = [\n (\"自定义\", \"自定义\"),\n (512, 512),\n (512, 768),\n (768, 512),\n (576, 1024),\n (768, 1024),\n (768, 1280),\n (768, 1344),\n (768, 1536),\n (816, 1920),\n (832, 1152),\n (896, 1152),\n (896, 1088),\n (1024, 1024),\n (1024, 576),\n (1024, 768),\n (1080, 1920),\n (1440, 2560),\n (1088, 896),\n (1152, 832),\n (1152, 896),\n (1280, 768),\n (1344, 768),\n (1536, 640),\n (1536, 768),\n (1920, 816),\n (1920, 1080),\n (2560, 1440),\n]"
},
{
"identifier": "log_node_info",
"path": "py/log.py",
"snippet": "def log_node_info(node_name, message=None):\n \"\"\"Logs an info message.\"\"\"\n _log_node(COLORS_FG[\"CYAN\"], node_name, message)"
},
{
"identifier": "log_node_error",
"path": "py/log.py",
"snippet": "def log_node_error(node_name, message=None):\n \"\"\"Logs an warn message.\"\"\"\n _log_node(COLORS_FG[\"RED\"], node_name, message)"
},
{
"identifier": "log_node_warn",
"path": "py/log.py",
"snippet": "def log_node_warn(node_name, message=None):\n \"\"\"Logs an warn message.\"\"\"\n _log_node(COLORS_FG[\"YELLOW\"], node_name, message)"
},
{
"identifier": "log_node_success",
"path": "py/log.py",
"snippet": "def log_node_success(node_name, message=None):\n \"\"\"Logs a success message.\"\"\"\n _log_node(COLORS_FG[\"GREEN\"], node_name, message)"
},
{
"identifier": "process_with_loras",
"path": "py/wildcards.py",
"snippet": "def process_with_loras(wildcard_opt, model, clip, title=\"Positive\", seed=None, can_load_lora=True, pipe_lora_stack=[]):\n lora_name_cache = []\n\n pass1 = process(wildcard_opt, seed)\n loras = extract_lora_values(pass1)\n pass2 = remove_lora_tags(pass1)\n\n has_noodle_key = True if \"__\" in wildcard_opt else False\n has_loras = True if loras != [] else False\n show_wildcard_prompt = True if has_noodle_key or has_loras else False\n\n for lora_name, model_weight, clip_weight, lbw, lbw_a, lbw_b in loras:\n if (lora_name.split('.')[-1]) not in folder_paths.supported_pt_extensions:\n lora_name = lora_name+\".safetensors\"\n\n lora_name = resolve_lora_name(lora_name_cache, lora_name)\n\n path = folder_paths.get_full_path(\"loras\", lora_name)\n\n if path is not None:\n print(f\"LORA: {lora_name}: {model_weight}, {clip_weight}, LBW={lbw}, A={lbw_a}, B={lbw_b}\")\n\n def default_lora():\n return nodes.LoraLoader().load_lora(model, clip, lora_name, model_weight, clip_weight)\n\n if lbw is not None:\n cls = nodes.NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire']\n if can_load_lora:\n model, clip, _ = cls().doit(model, clip, lora_name, model_weight, clip_weight, False, 0, lbw_a, lbw_b, \"\", lbw)\n pipe_lora_stack.append({\n \"lora_name\": lora_name, \"model\": model, \"clip\": clip, \"lora_model_strength\": model_weight,\n \"lora_clip_strength\": clip_weight,\n \"lbw_a\": lbw_a,\n \"lbw_b\": lbw_b,\n \"lbw\": lbw\n })\n else:\n pipe_lora_stack.append({\"lora_name\": lora_name, \"model\": model, \"clip\": clip, \"lora_model_strength\": model_weight, \"lora_clip_strength\": clip_weight})\n if can_load_lora:\n model, clip = default_lora()\n else:\n print(f\"LORA NOT FOUND: {lora_name}\")\n\n # print(f\"{title}: {pass2}\")\n # print(f'{title}_decode:', pass1)\n\n return model, clip, pass2, pass1, show_wildcard_prompt, pipe_lora_stack"
},
{
"identifier": "get_wildcard_list",
"path": "py/wildcards.py",
"snippet": "def get_wildcard_list():\n return [f\"__{x}__\" for x in easy_wildcard_dict.keys()]"
},
{
"identifier": "sample_dpmpp_2s_ancestral",
"path": "py/gradual_latent_hires_fix.py",
"snippet": "@torch.no_grad()\ndef sample_dpmpp_2s_ancestral(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"Ancestral sampling with DPM-Solver++(2S) second-order steps.\"\"\"\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n sigma_fn = lambda t: t.neg().exp()\n t_fn = lambda sigma: sigma.log().neg()\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n if sigma_down == 0:\n # Euler method\n d = to_d(x, sigmas[i], denoised)\n dt = sigma_down - sigmas[i]\n x = x + d * dt\n else:\n # DPM-Solver++(2S)\n t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)\n r = 1 / 2\n h = t_next - t\n s = t + r * h\n x_2 = (sigma_fn(s) / sigma_fn(t)) * x - (-h * r).expm1() * denoised\n denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)\n x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_2\n # Noise addition\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n noise_sampler = default_noise_sampler(x)\n noise = noise_sampler(sigmas[i], sigmas[i + 1])\n x = x + noise * sigma_up * s_noise\n return x"
},
{
"identifier": "sample_dpmpp_2m_sde",
"path": "py/gradual_latent_hires_fix.py",
"snippet": "@torch.no_grad()\ndef sample_dpmpp_2m_sde(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n solver_type=\"midpoint\",\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"DPM-Solver++(2M) SDE.\"\"\"\n\n if solver_type not in {\"heun\", \"midpoint\"}:\n raise ValueError(\"solver_type must be 'heun' or 'midpoint'\")\n\n seed = extra_args.get(\"seed\", None)\n sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n\n old_denoised = None\n h_last = None\n h = None\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n if sigmas[i + 1] == 0:\n # Denoising step\n x = denoised\n else:\n # DPM-Solver++(2M) SDE\n t, s = -sigmas[i].log(), -sigmas[i + 1].log()\n h = s - t\n eta_h = eta * h\n\n x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * x + (-h - eta_h).expm1().neg() * denoised\n\n if old_denoised is not None:\n r = h_last / h\n if solver_type == \"heun\":\n x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised)\n elif solver_type == \"midpoint\":\n x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)\n\n if eta:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n denoised = None # 次ステップとサイズがあわないのでとりあえずNoneにしておく。\n noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True)\n x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise\n\n old_denoised = denoised\n h_last = h\n return x"
},
{
"identifier": "sample_lcm",
"path": "py/gradual_latent_hires_fix.py",
"snippet": "@torch.no_grad()\ndef sample_lcm(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n noise_sampler=None,\n eta=None,\n s_noise=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n\n x = denoised\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n noise_sampler = default_noise_sampler(x)\n x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1])\n\n return x"
},
{
"identifier": "sample_euler_ancestral",
"path": "py/gradual_latent_hires_fix.py",
"snippet": "@torch.no_grad()\ndef sample_euler_ancestral(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"Ancestral sampling with Euler method steps.\"\"\"\n extra_args = {} if extra_args is None else extra_args\n noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler\n s_in = x.new_ones([x.shape[0]])\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n d = to_d(x, sigmas[i], denoised)\n # Euler method\n dt = sigma_down - sigmas[i]\n x = x + d * dt\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n\n noise_sampler = default_noise_sampler(x)\n noise = noise_sampler(sigmas[i], sigmas[i + 1])\n x = x + noise * sigma_up * s_noise\n return x"
},
{
"identifier": "DynThresh",
"path": "py/dynthres_core.py",
"snippet": "class DynThresh:\n\n Modes = [\"Constant\", \"Linear Down\", \"Cosine Down\", \"Half Cosine Down\", \"Linear Up\", \"Cosine Up\", \"Half Cosine Up\", \"Power Up\", \"Power Down\", \"Linear Repeating\", \"Cosine Repeating\", \"Sawtooth\"]\n Startpoints = [\"MEAN\", \"ZERO\"]\n Variabilities = [\"AD\", \"STD\"]\n\n def __init__(self, mimic_scale, threshold_percentile, mimic_mode, mimic_scale_min, cfg_mode, cfg_scale_min, sched_val, experiment_mode, max_steps, separate_feature_channels, scaling_startpoint, variability_measure, interpolate_phi):\n self.mimic_scale = mimic_scale\n self.threshold_percentile = threshold_percentile\n self.mimic_mode = mimic_mode\n self.cfg_mode = cfg_mode\n self.max_steps = max_steps\n self.cfg_scale_min = cfg_scale_min\n self.mimic_scale_min = mimic_scale_min\n self.experiment_mode = experiment_mode\n self.sched_val = sched_val\n self.sep_feat_channels = separate_feature_channels\n self.scaling_startpoint = scaling_startpoint\n self.variability_measure = variability_measure\n self.interpolate_phi = interpolate_phi\n\n def interpret_scale(self, scale, mode, min):\n scale -= min\n max = self.max_steps - 1\n frac = self.step / max\n if mode == \"Constant\":\n pass\n elif mode == \"Linear Down\":\n scale *= 1.0 - frac\n elif mode == \"Half Cosine Down\":\n scale *= math.cos(frac)\n elif mode == \"Cosine Down\":\n scale *= math.cos(frac * 1.5707)\n elif mode == \"Linear Up\":\n scale *= frac\n elif mode == \"Half Cosine Up\":\n scale *= 1.0 - math.cos(frac)\n elif mode == \"Cosine Up\":\n scale *= 1.0 - math.cos(frac * 1.5707)\n elif mode == \"Power Up\":\n scale *= math.pow(frac, self.sched_val)\n elif mode == \"Power Down\":\n scale *= 1.0 - math.pow(frac, self.sched_val)\n elif mode == \"Linear Repeating\":\n portion = (frac * self.sched_val) % 1.0\n scale *= (0.5 - portion) * 2 if portion < 0.5 else (portion - 0.5) * 2\n elif mode == \"Cosine Repeating\":\n scale *= math.cos(frac * 6.28318 * self.sched_val) * 0.5 + 0.5\n elif mode == \"Sawtooth\":\n scale *= (frac * self.sched_val) % 1.0\n scale += min\n return scale\n\n def dynthresh(self, cond, uncond, cfg_scale, weights):\n mimic_scale = self.interpret_scale(self.mimic_scale, self.mimic_mode, self.mimic_scale_min)\n cfg_scale = self.interpret_scale(cfg_scale, self.cfg_mode, self.cfg_scale_min)\n # uncond shape is (batch, 4, height, width)\n conds_per_batch = cond.shape[0] / uncond.shape[0]\n assert conds_per_batch == int(conds_per_batch), \"Expected # of conds per batch to be constant across batches\"\n cond_stacked = cond.reshape((-1, int(conds_per_batch)) + uncond.shape[1:])\n\n ### Normal first part of the CFG Scale logic, basically\n diff = cond_stacked - uncond.unsqueeze(1)\n if weights is not None:\n diff = diff * weights\n relative = diff.sum(1)\n\n ### Get the normal result for both mimic and normal scale\n mim_target = uncond + relative * mimic_scale\n cfg_target = uncond + relative * cfg_scale\n ### If we weren't doing mimic scale, we'd just return cfg_target here\n\n ### Now recenter the values relative to their average rather than absolute, to allow scaling from average\n mim_flattened = mim_target.flatten(2)\n cfg_flattened = cfg_target.flatten(2)\n mim_means = mim_flattened.mean(dim=2).unsqueeze(2)\n cfg_means = cfg_flattened.mean(dim=2).unsqueeze(2)\n mim_centered = mim_flattened - mim_means\n cfg_centered = cfg_flattened - cfg_means\n\n if self.sep_feat_channels:\n if self.variability_measure == 'STD':\n mim_scaleref = mim_centered.std(dim=2).unsqueeze(2)\n cfg_scaleref = cfg_centered.std(dim=2).unsqueeze(2)\n else: # 'AD'\n mim_scaleref = mim_centered.abs().max(dim=2).values.unsqueeze(2)\n cfg_scaleref = torch.quantile(cfg_centered.abs(), self.threshold_percentile, dim=2).unsqueeze(2)\n\n else:\n if self.variability_measure == 'STD':\n mim_scaleref = mim_centered.std()\n cfg_scaleref = cfg_centered.std()\n else: # 'AD'\n mim_scaleref = mim_centered.abs().max()\n cfg_scaleref = torch.quantile(cfg_centered.abs(), self.threshold_percentile)\n\n if self.scaling_startpoint == 'ZERO':\n scaling_factor = mim_scaleref / cfg_scaleref\n result = cfg_flattened * scaling_factor\n\n else: # 'MEAN'\n if self.variability_measure == 'STD':\n cfg_renormalized = (cfg_centered / cfg_scaleref) * mim_scaleref\n else: # 'AD'\n ### Get the maximum value of all datapoints (with an optional threshold percentile on the uncond)\n max_scaleref = torch.maximum(mim_scaleref, cfg_scaleref)\n ### Clamp to the max\n cfg_clamped = cfg_centered.clamp(-max_scaleref, max_scaleref)\n ### Now shrink from the max to normalize and grow to the mimic scale (instead of the CFG scale)\n cfg_renormalized = (cfg_clamped / max_scaleref) * mim_scaleref\n\n ### Now add it back onto the averages to get into real scale again and return\n result = cfg_renormalized + cfg_means\n\n actual_res = result.unflatten(2, mim_target.shape[2:])\n\n if self.interpolate_phi != 1.0:\n actual_res = actual_res * self.interpolate_phi + cfg_target * (1.0 - self.interpolate_phi)\n\n if self.experiment_mode == 1:\n num = actual_res.cpu().numpy()\n for y in range(0, 64):\n for x in range (0, 64):\n if num[0][0][y][x] > 1.0:\n num[0][1][y][x] *= 0.5\n if num[0][1][y][x] > 1.0:\n num[0][1][y][x] *= 0.5\n if num[0][2][y][x] > 1.5:\n num[0][2][y][x] *= 0.5\n actual_res = torch.from_numpy(num).to(device=uncond.device)\n elif self.experiment_mode == 2:\n num = actual_res.cpu().numpy()\n for y in range(0, 64):\n for x in range (0, 64):\n over_scale = False\n for z in range(0, 4):\n if abs(num[0][z][y][x]) > 1.5:\n over_scale = True\n if over_scale:\n for z in range(0, 4):\n num[0][z][y][x] *= 0.7\n actual_res = torch.from_numpy(num).to(device=uncond.device)\n elif self.experiment_mode == 3:\n coefs = torch.tensor([\n # R G B W\n [0.298, 0.207, 0.208, 0.0], # L1\n [0.187, 0.286, 0.173, 0.0], # L2\n [-0.158, 0.189, 0.264, 0.0], # L3\n [-0.184, -0.271, -0.473, 1.0], # L4\n ], device=uncond.device)\n res_rgb = torch.einsum(\"laxy,ab -> lbxy\", actual_res, coefs)\n max_r, max_g, max_b, max_w = res_rgb[0][0].max(), res_rgb[0][1].max(), res_rgb[0][2].max(), res_rgb[0][3].max()\n max_rgb = max(max_r, max_g, max_b)\n print(f\"test max = r={max_r}, g={max_g}, b={max_b}, w={max_w}, rgb={max_rgb}\")\n if self.step / (self.max_steps - 1) > 0.2:\n if max_rgb < 2.0 and max_w < 3.0:\n res_rgb /= max_rgb / 2.4\n else:\n if max_rgb > 2.4 and max_w > 3.0:\n res_rgb /= max_rgb / 2.4\n actual_res = torch.einsum(\"laxy,ab -> lbxy\", res_rgb, coefs.inverse())\n\n return actual_res"
}
] | import sys
import os
import re
import json
import time
import math
import torch
import psutil
import random
import datetime
import comfy.sd
import comfy.utils
import numpy as np
import folder_paths
import comfy.samplers
import comfy.controlnet
import latent_preview
import comfy.model_base
import comfy.model_management
from pathlib import Path
from comfy.sd import CLIP, VAE
from comfy.cli_args import args
from urllib.request import urlopen
from collections import defaultdict
from PIL.PngImagePlugin import PngInfo
from PIL import Image, ImageDraw, ImageFont
from comfy.model_patcher import ModelPatcher
from comfy_extras.chainner_models import model_loading
from typing import Dict, List, Optional, Tuple, Union, Any
from .adv_encode import advanced_encode, advanced_encode_XL
from server import PromptServer
from nodes import VAELoader, MAX_RESOLUTION, RepeatLatentBatch, NODE_CLASS_MAPPINGS as ALL_NODE_CLASS_MAPPINGS, ConditioningSetMask
from comfy_extras.nodes_mask import LatentCompositeMasked
from .config import BASE_RESOLUTIONS
from .log import log_node_info, log_node_error, log_node_warn, log_node_success
from .wildcards import process_with_loras, get_wildcard_list
from comfy_extras.nodes_stable3d import camera_embeddings
from .gradual_latent_hires_fix import sample_dpmpp_2s_ancestral, sample_dpmpp_2m_sde, sample_lcm, sample_euler_ancestral
from .dynthres_core import DynThresh | 10,669 |
#---------------------------------------------------------------加载器 开始----------------------------------------------------------------------#
# 简易加载器完整
class fullLoader:
@classmethod
def INPUT_TYPES(cls):
resolution_strings = [f"{width} x {height}" for width, height in BASE_RESOLUTIONS]
a1111_prompt_style_default = False
return {"required": {
"ckpt_name": (folder_paths.get_filename_list("checkpoints"),),
"config_name": (["Default", ] + folder_paths.get_filename_list("configs"), {"default": "Default"}),
"vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),),
"clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}),
"lora_name": (["None"] + folder_paths.get_filename_list("loras"),),
"lora_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
"lora_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
"resolution": (resolution_strings,),
"empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"positive": ("STRING", {"default": "Positive", "multiline": True}),
"positive_token_normalization": (["none", "mean", "length", "length+mean"],),
"positive_weight_interpretation": (["comfy", "A1111", "comfy++", "compel", "fixed attention"],),
"negative": ("STRING", {"default": "Negative", "multiline": True}),
"negative_token_normalization": (["none", "mean", "length", "length+mean"],),
"negative_weight_interpretation": (["comfy", "A1111", "comfy++", "compel", "fixed attention"],),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),
},
"optional": {"model_override": ("MODEL",), "clip_override": ("CLIP",), "vae_override": ("VAE",), "optional_lora_stack": ("LORA_STACK",), "a1111_prompt_style": ("BOOLEAN", {"default": a1111_prompt_style_default}),},
"hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}
}
RETURN_TYPES = ("PIPE_LINE", "MODEL", "VAE", "CLIP")
RETURN_NAMES = ("pipe", "model", "vae", "clip")
FUNCTION = "adv_pipeloader"
CATEGORY = "EasyUse/Loaders"
def adv_pipeloader(self, ckpt_name, config_name, vae_name, clip_skip,
lora_name, lora_model_strength, lora_clip_strength,
resolution, empty_latent_width, empty_latent_height,
positive, positive_token_normalization, positive_weight_interpretation,
negative, negative_token_normalization, negative_weight_interpretation,
batch_size, model_override=None, clip_override=None, vae_override=None, optional_lora_stack=None, a1111_prompt_style=False, prompt=None,
my_unique_id=None
):
model: ModelPatcher | None = None
clip: CLIP | None = None
vae: VAE | None = None
can_load_lora = True
pipe_lora_stack = []
# resolution
if resolution != "自定义 x 自定义":
try:
width, height = map(int, resolution.split(' x '))
empty_latent_width = width
empty_latent_height = height
except ValueError:
raise ValueError("Invalid base_resolution format.")
# Create Empty Latent
latent = torch.zeros([batch_size, 4, empty_latent_height // 8, empty_latent_width // 8]).cpu()
samples = {"samples": latent}
# Clean models from loaded_objects
easyCache.update_loaded_objects(prompt)
log_node_warn("正在处理模型...")
# 判断是否存在 模型叠加xyplot, 若存在优先缓存第一个模型
xyinputs_id = next((x for x in prompt if str(prompt[x]["class_type"]) == "easy XYInputs: ModelMergeBlocks"), None)
if xyinputs_id is not None:
node = prompt[xyinputs_id]
if "ckpt_name_1" in node["inputs"]:
ckpt_name_1 = node["inputs"]["ckpt_name_1"]
model, clip, vae = easyCache.load_checkpoint(ckpt_name_1)
can_load_lora = False
# Load models
elif model_override is not None and clip_override is not None and vae_override is not None:
model = model_override
clip = clip_override
vae = vae_override
elif model_override is not None:
raise Exception(f"[ERROR] clip or vae is missing")
elif vae_override is not None:
raise Exception(f"[ERROR] model or clip is missing")
elif clip_override is not None:
raise Exception(f"[ERROR] model or vae is missing")
else:
model, clip, vae = easyCache.load_checkpoint(ckpt_name, config_name)
if optional_lora_stack is not None:
for lora in optional_lora_stack:
if can_load_lora:
model, clip = easyCache.load_lora(lora[0], model, clip, lora[1], lora[2])
pipe_lora_stack.append({"lora_name": lora[0], "model": model, "clip": clip, "lora_model_strength": lora[1], "lora_clip_strength": lora[2]})
if lora_name != "None":
if can_load_lora:
model, clip = easyCache.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength)
pipe_lora_stack.append({"lora_name": lora_name, "model": model, "clip": clip, "lora_model_strength": lora_model_strength,
"lora_clip_strength": lora_clip_strength})
# Check for custom VAE
if vae_name not in ["Baked VAE", "Baked-VAE"]:
vae = easyCache.load_vae(vae_name)
# CLIP skip
if not clip:
raise Exception("No CLIP found")
log_node_warn("正在处理提示词...")
positive_seed = find_wildcards_seed(positive, prompt)
|
# 加载器
class easyLoader:
def __init__(self):
self.loaded_objects = {
"ckpt": defaultdict(tuple), # {ckpt_name: (model, ...)}
"clip": defaultdict(tuple),
"clip_vision": defaultdict(tuple),
"bvae": defaultdict(tuple),
"vae": defaultdict(object),
"lora": defaultdict(dict), # {lora_name: {UID: (model_lora, clip_lora)}}
}
self.memory_threshold = self.determine_memory_threshold(0.7)
def clean_values(self, values: str):
original_values = values.split("; ")
cleaned_values = []
for value in original_values:
cleaned_value = value.strip(';').strip()
if cleaned_value == "":
continue
try:
cleaned_value = int(cleaned_value)
except ValueError:
try:
cleaned_value = float(cleaned_value)
except ValueError:
pass
cleaned_values.append(cleaned_value)
return cleaned_values
def clear_unused_objects(self, desired_names: set, object_type: str):
keys = set(self.loaded_objects[object_type].keys())
for key in keys - desired_names:
del self.loaded_objects[object_type][key]
def get_input_value(self, entry, key):
val = entry["inputs"][key]
return val if isinstance(val, str) else val[0]
def process_pipe_loader(self, entry,
desired_ckpt_names, desired_vae_names,
desired_lora_names, desired_lora_settings, num_loras=3, suffix=""):
for idx in range(1, num_loras + 1):
lora_name_key = f"{suffix}lora{idx}_name"
desired_lora_names.add(self.get_input_value(entry, lora_name_key))
setting = f'{self.get_input_value(entry, lora_name_key)};{entry["inputs"][f"{suffix}lora{idx}_model_strength"]};{entry["inputs"][f"{suffix}lora{idx}_clip_strength"]}'
desired_lora_settings.add(setting)
desired_ckpt_names.add(self.get_input_value(entry, f"{suffix}ckpt_name"))
desired_vae_names.add(self.get_input_value(entry, f"{suffix}vae_name"))
def update_loaded_objects(self, prompt):
desired_ckpt_names = set()
desired_vae_names = set()
desired_lora_names = set()
desired_lora_settings = set()
for entry in prompt.values():
class_type = entry["class_type"]
if class_type == "easy a1111Loader" or class_type == "easy comfyLoader":
lora_name = self.get_input_value(entry, "lora_name")
desired_lora_names.add(lora_name)
setting = f'{lora_name};{entry["inputs"]["lora_model_strength"]};{entry["inputs"]["lora_clip_strength"]}'
desired_lora_settings.add(setting)
desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name"))
desired_vae_names.add(self.get_input_value(entry, "vae_name"))
elif class_type == "easy zero123Loader" or class_type == 'easy svdLoader':
desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name"))
desired_vae_names.add(self.get_input_value(entry, "vae_name"))
elif class_type == "easy XYInputs: ModelMergeBlocks":
desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_1"))
desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_2"))
vae_use = self.get_input_value(entry, "vae_use")
if vae_use != 'Use Model 1' and vae_use != 'Use Model 2':
desired_vae_names.add(vae_use)
object_types = ["ckpt", "clip", "bvae", "vae", "lora"]
for object_type in object_types:
desired_names = desired_ckpt_names if object_type in ["ckpt", "clip", "bvae"] else desired_vae_names if object_type == "vae" else desired_lora_names
self.clear_unused_objects(desired_names, object_type)
def add_to_cache(self, obj_type, key, value):
"""
Add an item to the cache with the current timestamp.
"""
timestamped_value = (value, time.time())
self.loaded_objects[obj_type][key] = timestamped_value
def determine_memory_threshold(self, percentage=0.8):
"""
Determines the memory threshold as a percentage of the total available memory.
Args:
- percentage (float): The fraction of total memory to use as the threshold.
Should be a value between 0 and 1. Default is 0.8 (80%).
Returns:
- memory_threshold (int): Memory threshold in bytes.
"""
total_memory = psutil.virtual_memory().total
memory_threshold = total_memory * percentage
return memory_threshold
def get_memory_usage(self):
"""
Returns the memory usage of the current process in bytes.
"""
process = psutil.Process(os.getpid())
return process.memory_info().rss
def eviction_based_on_memory(self):
"""
Evicts objects from cache based on memory usage and priority.
"""
current_memory = self.get_memory_usage()
if current_memory < self.memory_threshold:
return
eviction_order = ["vae", "lora", "bvae", "clip", "ckpt"]
for obj_type in eviction_order:
if current_memory < self.memory_threshold:
break
# Sort items based on age (using the timestamp)
items = list(self.loaded_objects[obj_type].items())
items.sort(key=lambda x: x[1][1]) # Sorting by timestamp
for item in items:
if current_memory < self.memory_threshold:
break
del self.loaded_objects[obj_type][item[0]]
current_memory = self.get_memory_usage()
def load_checkpoint(self, ckpt_name, config_name=None, load_vision=False):
cache_name = ckpt_name
if config_name not in [None, "Default"]:
cache_name = ckpt_name + "_" + config_name
if cache_name in self.loaded_objects["ckpt"]:
cache_out = self.loaded_objects["clip_vision"][cache_name][0] if load_vision else self.loaded_objects["clip"][cache_name][0]
return self.loaded_objects["ckpt"][cache_name][0], cache_out, self.loaded_objects["bvae"][cache_name][0]
ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
output_clip = False if load_vision else True
output_clipvision = True if load_vision else False
if config_name not in [None, "Default"]:
config_path = folder_paths.get_full_path("configs", config_name)
loaded_ckpt = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision,
embedding_directory=folder_paths.get_folder_paths("embeddings"))
else:
loaded_ckpt = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings"))
self.add_to_cache("ckpt", cache_name, loaded_ckpt[0])
self.add_to_cache("bvae", cache_name, loaded_ckpt[2])
if load_vision:
out = loaded_ckpt[3]
self.add_to_cache("clip_vision", cache_name, out)
else:
out = loaded_ckpt[1]
self.add_to_cache("clip", cache_name, loaded_ckpt[1])
self.eviction_based_on_memory()
return loaded_ckpt[0], out, loaded_ckpt[2]
def load_vae(self, vae_name):
if vae_name in self.loaded_objects["vae"]:
return self.loaded_objects["vae"][vae_name][0]
vae_path = folder_paths.get_full_path("vae", vae_name)
sd = comfy.utils.load_torch_file(vae_path)
loaded_vae = comfy.sd.VAE(sd=sd)
self.add_to_cache("vae", vae_name, loaded_vae)
self.eviction_based_on_memory()
return loaded_vae
def load_lora(self, lora_name, model, clip, strength_model, strength_clip):
model_hash = str(model)[44:-1]
clip_hash = str(clip)[25:-1]
unique_id = f'{model_hash};{clip_hash};{lora_name};{strength_model};{strength_clip}'
if unique_id in self.loaded_objects["lora"] and unique_id in self.loaded_objects["lora"][lora_name]:
return self.loaded_objects["lora"][unique_id][0]
lora_path = folder_paths.get_full_path("loras", lora_name)
lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
self.add_to_cache("lora", unique_id, (model_lora, clip_lora))
self.eviction_based_on_memory()
return model_lora, clip_lora
# 采样器
class easySampler:
def __init__(self):
self.last_helds: dict[str, list] = {
"results": [],
"pipe_line": [],
}
@staticmethod
def tensor2pil(image: torch.Tensor) -> Image.Image:
"""Convert a torch tensor to a PIL image."""
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
@staticmethod
def pil2tensor(image: Image.Image) -> torch.Tensor:
"""Convert a PIL image to a torch tensor."""
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
@staticmethod
def enforce_mul_of_64(d):
d = int(d)
if d <= 7:
d = 8
leftover = d % 8 # 8 is the number of pixels per byte
if leftover != 0: # if the number of pixels is not a multiple of 8
if (leftover < 4): # if the number of pixels is less than 4
d -= leftover # remove the leftover pixels
else: # if the number of pixels is more than 4
d += 8 - leftover # add the leftover pixels
return int(d)
@staticmethod
def safe_split(to_split: str, delimiter: str) -> List[str]:
"""Split the input string and return a list of non-empty parts."""
parts = to_split.split(delimiter)
parts = [part for part in parts if part not in ('', ' ', ' ')]
while len(parts) < 2:
parts.append('None')
return parts
def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0,
disable_noise=False, start_step=None, last_step=None, force_full_denoise=False,
preview_latent=True, disable_pbar=False):
device = comfy.model_management.get_torch_device()
latent_image = latent["samples"]
if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
else:
batch_inds = latent["batch_index"] if "batch_index" in latent else None
noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
noise_mask = None
if "noise_mask" in latent:
noise_mask = latent["noise_mask"]
preview_format = "JPEG"
if preview_format not in ["JPEG", "PNG"]:
preview_format = "JPEG"
previewer = False
if preview_latent:
previewer = latent_preview.get_previewer(device, model.model.latent_format)
pbar = comfy.utils.ProgressBar(steps)
def callback(step, x0, x, total_steps):
preview_bytes = None
if previewer:
preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
pbar.update_absolute(step + 1, total_steps, preview_bytes)
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative,
latent_image,
denoise=denoise, disable_noise=disable_noise, start_step=start_step,
last_step=last_step,
force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback,
disable_pbar=disable_pbar, seed=seed)
out = latent.copy()
out["samples"] = samples
return out
def custom_ksampler(self, model, seed, steps, cfg, _sampler, sigmas, positive, negative, latent,
disable_noise=False, preview_latent=True, disable_pbar=False):
device = comfy.model_management.get_torch_device()
latent_image = latent["samples"]
if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
else:
batch_inds = latent["batch_index"] if "batch_index" in latent else None
noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
noise_mask = None
if "noise_mask" in latent:
noise_mask = latent["noise_mask"]
preview_format = "JPEG"
if preview_format not in ["JPEG", "PNG"]:
preview_format = "JPEG"
previewer = False
if preview_latent:
previewer = latent_preview.get_previewer(device, model.model.latent_format)
pbar = comfy.utils.ProgressBar(steps)
def callback(step, x0, x, total_steps):
preview_bytes = None
if previewer:
preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
pbar.update_absolute(step + 1, total_steps, preview_bytes)
samples = comfy.sample.sample_custom(model, noise, cfg, _sampler, sigmas, positive, negative, latent_image,
noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar,
seed=seed)
out = latent.copy()
out["samples"] = samples
return out
def get_value_by_id(self, key: str, my_unique_id: Any) -> Optional[Any]:
"""Retrieve value by its associated ID."""
try:
for value, id_ in self.last_helds[key]:
if id_ == my_unique_id:
return value
except KeyError:
return None
def update_value_by_id(self, key: str, my_unique_id: Any, new_value: Any) -> Union[bool, None]:
"""Update the value associated with a given ID. Return True if updated, False if appended, None if key doesn't exist."""
try:
for i, (value, id_) in enumerate(self.last_helds[key]):
if id_ == my_unique_id:
self.last_helds[key][i] = (new_value, id_)
return True
self.last_helds[key].append((new_value, my_unique_id))
return False
except KeyError:
return False
def upscale(self, samples, upscale_method, scale_by, crop):
s = samples.copy()
width = self.enforce_mul_of_64(round(samples["samples"].shape[3] * scale_by))
height = self.enforce_mul_of_64(round(samples["samples"].shape[2] * scale_by))
if (width > MAX_RESOLUTION):
width = MAX_RESOLUTION
if (height > MAX_RESOLUTION):
height = MAX_RESOLUTION
s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, crop)
return (s,)
def handle_upscale(self, samples: dict, upscale_method: str, factor: float, crop: bool) -> dict:
"""Upscale the samples if the upscale_method is not set to 'None'."""
if upscale_method != "None":
samples = self.upscale(samples, upscale_method, factor, crop)[0]
return samples
def init_state(self, my_unique_id: Any, key: str, default: Any) -> Any:
"""Initialize the state by either fetching the stored value or setting a default."""
value = self.get_value_by_id(key, my_unique_id)
if value is not None:
return value
return default
def get_output(self, pipe: dict,) -> Tuple:
"""Return a tuple of various elements fetched from the input pipe dictionary."""
return (
pipe,
pipe.get("images"),
pipe.get("model"),
pipe.get("positive"),
pipe.get("negative"),
pipe.get("samples"),
pipe.get("vae"),
pipe.get("clip"),
pipe.get("seed"),
)
def get_output_sdxl(self, sdxl_pipe: dict) -> Tuple:
"""Return a tuple of various elements fetched from the input sdxl_pipe dictionary."""
return (
sdxl_pipe,
sdxl_pipe.get("model"),
sdxl_pipe.get("positive"),
sdxl_pipe.get("negative"),
sdxl_pipe.get("vae"),
sdxl_pipe.get("refiner_model"),
sdxl_pipe.get("refiner_positive"),
sdxl_pipe.get("refiner_negative"),
sdxl_pipe.get("refiner_vae"),
sdxl_pipe.get("samples"),
sdxl_pipe.get("clip"),
sdxl_pipe.get("images"),
sdxl_pipe.get("seed")
)
# XY图表
class easyXYPlot:
def __init__(self, xyPlotData, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id):
self.x_node_type, self.x_type = easySampler.safe_split(xyPlotData.get("x_axis"), ': ')
self.y_node_type, self.y_type = easySampler.safe_split(xyPlotData.get("y_axis"), ': ')
self.x_values = xyPlotData.get("x_vals") if self.x_type != "None" else []
self.y_values = xyPlotData.get("y_vals") if self.y_type != "None" else []
self.grid_spacing = xyPlotData.get("grid_spacing")
self.latent_id = 0
self.output_individuals = xyPlotData.get("output_individuals")
self.x_label, self.y_label = [], []
self.max_width, self.max_height = 0, 0
self.latents_plot = []
self.image_list = []
self.num_cols = len(self.x_values) if len(self.x_values) > 0 else 1
self.num_rows = len(self.y_values) if len(self.y_values) > 0 else 1
self.total = self.num_cols * self.num_rows
self.num = 0
self.save_prefix = save_prefix
self.image_output = image_output
self.prompt = prompt
self.extra_pnginfo = extra_pnginfo
self.my_unique_id = my_unique_id
# Helper Functions
@staticmethod
def define_variable(plot_image_vars, value_type, value, index):
plot_image_vars[value_type] = value
if value_type in ["seed", "Seeds++ Batch"]:
value_label = f"{value}"
else:
value_label = f"{value_type}: {value}"
if "ControlNet" in value_type:
if "," in value:
line = value.split(',')
value_label = f"{value_type}: {line[2]}"
if value_type in ["ModelMergeBlocks"]:
if ":" in value:
line = value.split(':')
value_label = f"{line[0]}"
elif len(value) > 16:
value_label = f"ModelMergeBlocks {index + 1}"
else:
value_label = f"MMB: {value}"
if value_type in ["Positive Prompt S/R"]:
value_label = f"pos prompt {index + 1}" if index>0 else f"pos prompt"
if value_type in ["Negative Prompt S/R"]:
value_label = f"neg prompt {index + 1}" if index>0 else f"neg prompt"
if value_type in ["steps", "cfg", "denoise", "clip_skip",
"lora_model_strength", "lora_clip_strength"]:
value_label = f"{value_type}: {value}"
if value_type == "positive":
value_label = f"pos prompt {index + 1}"
elif value_type == "negative":
value_label = f"neg prompt {index + 1}"
return plot_image_vars, value_label
@staticmethod
def get_font(font_size):
return ImageFont.truetype(str(Path(os.path.join(Path(__file__).parent.parent, 'resources/OpenSans-Medium.ttf'))), font_size)
@staticmethod
def update_label(label, value, num_items):
if len(label) < num_items:
return [*label, value]
return label
@staticmethod
def rearrange_tensors(latent, num_cols, num_rows):
new_latent = []
for i in range(num_rows):
for j in range(num_cols):
index = j * num_rows + i
new_latent.append(latent[index])
return new_latent
def calculate_background_dimensions(self):
border_size = int((self.max_width // 8) * 1.5) if self.y_type != "None" or self.x_type != "None" else 0
bg_width = self.num_cols * (self.max_width + self.grid_spacing) - self.grid_spacing + border_size * (
self.y_type != "None")
bg_height = self.num_rows * (self.max_height + self.grid_spacing) - self.grid_spacing + border_size * (
self.x_type != "None")
x_offset_initial = border_size if self.y_type != "None" else 0
y_offset = border_size if self.x_type != "None" else 0
return bg_width, bg_height, x_offset_initial, y_offset
def adjust_font_size(self, text, initial_font_size, label_width):
font = self.get_font(initial_font_size)
text_width, _ = font.getsize(text)
scaling_factor = 0.9
if text_width > (label_width * scaling_factor):
return int(initial_font_size * (label_width / text_width) * scaling_factor)
else:
return initial_font_size
def create_label(self, img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=10):
label_width = img.width if is_x_label else img.height
# Adjust font size
font_size = self.adjust_font_size(text, initial_font_size, label_width)
font_size = min(max_font_size, font_size) # Ensure font isn't too large
font_size = max(min_font_size, font_size) # Ensure font isn't too small
label_height = int(font_size * 1.5) if is_x_label else font_size
label_bg = Image.new('RGBA', (label_width, label_height), color=(255, 255, 255, 0))
d = ImageDraw.Draw(label_bg)
font = self.get_font(font_size)
# Check if text will fit, if not insert ellipsis and reduce text
if d.textsize(text, font=font)[0] > label_width:
while d.textsize(text + '...', font=font)[0] > label_width and len(text) > 0:
text = text[:-1]
text = text + '...'
# Compute text width and height for multi-line text
text_lines = text.split('\n')
text_widths, text_heights = zip(*[d.textsize(line, font=font) for line in text_lines])
max_text_width = max(text_widths)
total_text_height = sum(text_heights)
# Compute position for each line of text
lines_positions = []
current_y = 0
for line, line_width, line_height in zip(text_lines, text_widths, text_heights):
text_x = (label_width - line_width) // 2
text_y = current_y + (label_height - total_text_height) // 2
current_y += line_height
lines_positions.append((line, (text_x, text_y)))
# Draw each line of text
for line, (text_x, text_y) in lines_positions:
d.text((text_x, text_y), line, fill='black', font=font)
return label_bg
def sample_plot_image(self, plot_image_vars, samples, preview_latent, latents_plot, image_list, disable_noise,
start_step, last_step, force_full_denoise, x_value=None, y_value=None):
model, clip, vae, positive, negative, seed, steps, cfg = None, None, None, None, None, None, None, None
sampler_name, scheduler, denoise = None, None, None
# 高级用法
if plot_image_vars["x_node_type"] == "advanced" or plot_image_vars["y_node_type"] == "advanced":
if self.x_type == "Seeds++ Batch" or self.y_type == "Seeds++ Batch":
seed = int(x_value) if self.x_type == "Seeds++ Batch" else int(y_value)
if self.x_type == "Steps" or self.y_type == "Steps":
steps = int(x_value) if self.x_type == "Steps" else int(y_value)
if self.x_type == "StartStep" or self.y_type == "StartStep":
start_step = int(x_value) if self.x_type == "StartStep" else int(y_value)
if self.x_type == "EndStep" or self.y_type == "EndStep":
last_step = int(x_value) if self.x_type == "EndStep" else int(y_value)
if self.x_type == "CFG Scale" or self.y_type == "CFG Scale":
cfg = float(x_value) if self.x_type == "CFG Scale" else float(y_value)
if self.x_type == "Sampler" or self.y_type == "Sampler" or self.y_type == "Sampler & Scheduler":
sampler_name = float(x_value) if self.x_type == "Sampler" or self.x_type == "Sampler & Scheduler" else float(y_value)
if self.x_type == "Scheduler" or self.y_type == "Scheduler" or self.y_type == "Sampler & Scheduler":
scheduler = float(x_value) if self.x_type == "Scheduler" or self.x_type == "Sampler & Scheduler" else float(y_value)
if self.x_type == "Denoise" or self.y_type == "Denoise":
denoise = float(x_value) if self.x_type == "Denoise" else float(y_value)
# 模型叠加
if self.x_type == "ModelMergeBlocks" or self.y_type == "ModelMergeBlocks":
ckpt_name_1, ckpt_name_2 = plot_image_vars['models']
model1, clip1, vae1 = easyCache.load_checkpoint(ckpt_name_1)
model2, clip2, vae2 = easyCache.load_checkpoint(ckpt_name_2)
xy_values = x_value if self.x_type == "ModelMergeBlocks" else y_value
if ":" in xy_values:
xy_line = xy_values.split(':')
xy_values = xy_line[1]
xy_arrs = xy_values.split(',')
# ModelMergeBlocks
if len(xy_arrs) == 3:
input, middle, out = xy_arrs
kwargs = {
"input": input,
"middle": middle,
"out": out
}
elif len(xy_arrs) == 30:
kwargs = {}
kwargs["time_embed."] = xy_arrs[0]
kwargs["label_emb."] = xy_arrs[1]
for i in range(12):
kwargs["input_blocks.{}.".format(i)] = xy_arrs[2+i]
for i in range(3):
kwargs["middle_block.{}.".format(i)] = xy_arrs[14+i]
for i in range(12):
kwargs["output_blocks.{}.".format(i)] = xy_arrs[17+i]
kwargs["out."] = xy_arrs[29]
else:
raise Exception("ModelMergeBlocks weight length error")
default_ratio = next(iter(kwargs.values()))
m = model1.clone()
kp = model2.get_key_patches("diffusion_model.")
for k in kp:
ratio = float(default_ratio)
k_unet = k[len("diffusion_model."):]
last_arg_size = 0
for arg in kwargs:
if k_unet.startswith(arg) and last_arg_size < len(arg):
ratio = float(kwargs[arg])
last_arg_size = len(arg)
m.add_patches({k: kp[k]}, 1.0 - ratio, ratio)
vae_use = plot_image_vars['vae_use']
clip = clip2 if vae_use == 'Use Model 2' else clip1
if vae_use == 'Use Model 2':
vae = vae2
elif vae_use == 'Use Model 1':
vae = vae1
else:
(vae,) = VAELoader().load_vae(vae_use)
model = m
# 如果存在lora_stack叠加lora
optional_lora_stack = plot_image_vars['lora_stack']
if optional_lora_stack is not None and optional_lora_stack != []:
for lora in optional_lora_stack:
lora_name = lora["lora_name"]
model = model if model is not None else lora["model"]
clip = clip if clip is not None else lora["clip"]
lora_model_strength = lora["lora_model_strength"]
lora_clip_strength = lora["lora_clip_strength"]
if "lbw" in lora:
lbw = lora["lbw"]
lbw_a = lora["lbw_a"]
lbw_b = lora["lbw_b"]
cls = ALL_NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire']
model, clip, _ = cls().doit(model, clip, lora_name, lora_model_strength, lora_clip_strength, False, 0,
lbw_a, lbw_b, "", lbw)
model, clip = easyCache.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength)
# 处理clip
clip = clip.clone()
if plot_image_vars['clip_skip'] != 0:
clip.clip_layer(plot_image_vars['clip_skip'])
# 提示词
if "Positive" in self.x_type or "Positive" in self.y_type:
if self.x_type == 'Positive Prompt S/R' or self.y_type == 'Positive Prompt S/R':
positive = x_value if self.x_type == "Positive Prompt S/R" else y_value
if plot_image_vars['a1111_prompt_style']:
if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS:
cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode']
steps = plot_image_vars['steps']
clip = clip if clip is not None else plot_image_vars["clip"]
positive, = cls().encode(clip, positive, "A1111", True, True, False, False, 6,
1024, 1024, 0, 0, 1024, 1024, '', '', steps)
else:
raise Exception(
f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'")
else:
clip = clip if clip is not None else plot_image_vars["clip"]
positive, positive_pooled = advanced_encode(clip, positive,
plot_image_vars['positive_token_normalization'],
plot_image_vars[
'positive_weight_interpretation'],
w_max=1.0,
apply_to_pooled="enable")
positive = [[positive, {"pooled_output": positive_pooled}]]
if "Negative" in self.x_type or "Negative" in self.y_type:
if self.x_type == 'Negative Prompt S/R' or self.y_type == 'Negative Prompt S/R':
negative = x_value if self.x_type == "Negative Prompt S/R" else y_value
if plot_image_vars['a1111_prompt_style']:
if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS:
cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode']
steps = plot_image_vars['steps']
clip = clip if clip is not None else plot_image_vars["clip"]
negative, = cls().encode(clip, negative, "A1111", True, True, False, False, 6,
1024, 1024, 0, 0, 1024, 1024, '', '', steps)
else:
raise Exception(
f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'")
else:
clip = clip if clip is not None else plot_image_vars["clip"]
negative, negative_pooled = advanced_encode(clip, negative,
plot_image_vars['negative_token_normalization'],
plot_image_vars[
'negative_weight_interpretation'],
w_max=1.0,
apply_to_pooled="enable")
negative = [[negative, {"pooled_output": negative_pooled}]]
# ControlNet
if "ControlNet" in self.x_type or "ControlNet" in self.y_type:
_pipe = {
"model": model if model is not None else plot_image_vars["model"],
"positive": positive if positive is not None else plot_image_vars["positive_cond"],
"negative": negative if negative is not None else plot_image_vars["negative_cond"],
"vae": vae if vae is not None else plot_image_vars['vae'],
"clip": clip if clip is not None else plot_image_vars['clip'],
"samples": None,
"images": None,
"loader_settings": {}
}
cnet = plot_image_vars["cnet"] if "cnet" in plot_image_vars else None
if cnet:
strength, start_percent, end_percent = x_value.split(',') if "ControlNet" in self.x_type else y_value.split(',')
strength = float(strength)
start_percent = float(start_percent)
end_percent = float(end_percent)
for index, item in enumerate(cnet):
control_net_names = item[0]
image = item[1]
for idx, control_net_name in enumerate(control_net_names):
# print(control_net_name)
_pipe, = controlnetAdvanced().controlnetApply(_pipe, image, control_net_name, None, strength, start_percent,
end_percent)
positive = _pipe['positive']
negative = _pipe['negative']
del _pipe
# 简单用法
if plot_image_vars["x_node_type"] == "loader" or plot_image_vars["y_node_type"] == "loader":
model, clip, vae = easyCache.load_checkpoint(plot_image_vars['ckpt_name'])
if plot_image_vars['lora_name'] != "None":
model, clip = easyCache.load_lora(plot_image_vars['lora_name'], model, clip,
plot_image_vars['lora_model_strength'],
plot_image_vars['lora_clip_strength'])
# Check for custom VAE
if plot_image_vars['vae_name'] not in ["Baked-VAE", "Baked VAE"]:
vae = easyCache.load_vae(plot_image_vars['vae_name'])
# CLIP skip
if not clip:
raise Exception("No CLIP found")
clip = clip.clone()
clip.clip_layer(plot_image_vars['clip_skip'])
if plot_image_vars['a1111_prompt_style']:
if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS:
cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode']
steps = plot_image_vars['steps']
positive, = cls().encode(clip, plot_image_vars['positive'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps)
negative, = cls().encode(clip, plot_image_vars['negative'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps)
else:
raise Exception(f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'")
else:
positive, positive_pooled = advanced_encode(clip, plot_image_vars['positive'],
plot_image_vars['positive_token_normalization'],
plot_image_vars['positive_weight_interpretation'], w_max=1.0,
apply_to_pooled="enable")
positive = [[positive, {"pooled_output": positive_pooled}]]
negative, negative_pooled = advanced_encode(clip, plot_image_vars['negative'],
plot_image_vars['negative_token_normalization'],
plot_image_vars['negative_weight_interpretation'], w_max=1.0,
apply_to_pooled="enable")
negative = [[negative, {"pooled_output": negative_pooled}]]
model = model if model is not None else plot_image_vars["model"]
clip = clip if clip is not None else plot_image_vars["clip"]
vae = vae if vae is not None else plot_image_vars["vae"]
positive = positive if positive is not None else plot_image_vars["positive_cond"]
negative = negative if negative is not None else plot_image_vars["negative_cond"]
seed = seed if seed is not None else plot_image_vars["seed"]
steps = steps if steps is not None else plot_image_vars["steps"]
cfg = cfg if cfg is not None else plot_image_vars["cfg"]
sampler_name = sampler_name if sampler_name is not None else plot_image_vars["sampler_name"]
scheduler = scheduler if scheduler is not None else plot_image_vars["scheduler"]
denoise = denoise if denoise is not None else plot_image_vars["denoise"]
# Sample
samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples,
denoise=denoise, disable_noise=disable_noise, preview_latent=preview_latent,
start_step=start_step, last_step=last_step,
force_full_denoise=force_full_denoise)
# Decode images and store
latent = samples["samples"]
# Add the latent tensor to the tensors list
latents_plot.append(latent)
# Decode the image
image = vae.decode(latent).cpu()
if self.output_individuals in [True, "True"]:
easy_save = easySave(self.my_unique_id, self.prompt, self.extra_pnginfo)
easy_save.images(image, self.save_prefix, self.image_output, group_id=self.num)
# Convert the image from tensor to PIL Image and add it to the list
pil_image = easySampler.tensor2pil(image)
image_list.append(pil_image)
# Update max dimensions
self.max_width = max(self.max_width, pil_image.width)
self.max_height = max(self.max_height, pil_image.height)
# Return the touched variables
return image_list, self.max_width, self.max_height, latents_plot
# Process Functions
def validate_xy_plot(self):
if self.x_type == 'None' and self.y_type == 'None':
log_node_warn(f'easyKsampler[{self.my_unique_id}]','No Valid Plot Types - Reverting to default sampling...')
return False
else:
return True
def get_latent(self, samples):
# Extract the 'samples' tensor from the dictionary
latent_image_tensor = samples["samples"]
# Split the tensor into individual image tensors
image_tensors = torch.split(latent_image_tensor, 1, dim=0)
# Create a list of dictionaries containing the individual image tensors
latent_list = [{'samples': image} for image in image_tensors]
# Set latent only to the first latent of batch
if self.latent_id >= len(latent_list):
log_node_warn(f'easy kSampler[{self.my_unique_id}]',f'The selected latent_id ({self.latent_id}) is out of range.')
log_node_warn(f'easy kSampler[{self.my_unique_id}]', f'Automatically setting the latent_id to the last image in the list (index: {len(latent_list) - 1}).')
self.latent_id = len(latent_list) - 1
return latent_list[self.latent_id]
def get_labels_and_sample(self, plot_image_vars, latent_image, preview_latent, start_step, last_step,
force_full_denoise, disable_noise):
for x_index, x_value in enumerate(self.x_values):
plot_image_vars, x_value_label = self.define_variable(plot_image_vars, self.x_type, x_value,
x_index)
self.x_label = self.update_label(self.x_label, x_value_label, len(self.x_values))
if self.y_type != 'None':
for y_index, y_value in enumerate(self.y_values):
plot_image_vars, y_value_label = self.define_variable(plot_image_vars, self.y_type, y_value,
y_index)
self.y_label = self.update_label(self.y_label, y_value_label, len(self.y_values))
# ttNl(f'{CC.GREY}X: {x_value_label}, Y: {y_value_label}').t(
# f'Plot Values {self.num}/{self.total} ->').p()
self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image(
plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list,
disable_noise, start_step, last_step, force_full_denoise, x_value, y_value)
self.num += 1
else:
# ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p()
self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image(
plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise,
start_step, last_step, force_full_denoise, x_value)
self.num += 1
# Rearrange latent array to match preview image grid
self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows)
# Concatenate the tensors along the first dimension (dim=0)
self.latents_plot = torch.cat(self.latents_plot, dim=0)
return self.latents_plot
def plot_images_and_labels(self):
# Calculate the background dimensions
bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions()
# Create the white background image
background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255))
output_image = []
for row_index in range(self.num_rows):
x_offset = x_offset_initial
for col_index in range(self.num_cols):
index = col_index * self.num_rows + row_index
img = self.image_list[index]
output_image.append(sampler.pil2tensor(img))
background.paste(img, (x_offset, y_offset))
# Handle X label
if row_index == 0 and self.x_type != "None":
label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512))
label_y = (y_offset - label_bg.height) // 2
background.alpha_composite(label_bg, (x_offset, label_y))
# Handle Y label
if col_index == 0 and self.y_type != "None":
label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False)
label_bg = label_bg.rotate(90, expand=True)
label_x = (x_offset - label_bg.width) // 2
label_y = y_offset + (img.height - label_bg.height) // 2
background.alpha_composite(label_bg, (label_x, label_y))
x_offset += img.width + self.grid_spacing
y_offset += img.height + self.grid_spacing
return (sampler.pil2tensor(background), output_image)
easyCache = easyLoader()
sampler = easySampler()
def check_link_to_clip(node_id, clip_id, visited=None, node=None):
"""Check if a given node links directly or indirectly to a loader node."""
if visited is None:
visited = set()
if node_id in visited:
return False
visited.add(node_id)
if "pipe" in node["inputs"]:
link_ids = node["inputs"]["pipe"]
for id in link_ids:
if id != 0 and id == str(clip_id):
return True
return False
def find_nearest_steps(clip_id, prompt):
"""Find the nearest KSampler or preSampling node that references the given id."""
for id in prompt:
node = prompt[id]
if "Sampler" in node["class_type"] or "sampler" in node["class_type"] or "Sampling" in node["class_type"]:
# Check if this KSampler node directly or indirectly references the given CLIPTextEncode node
if check_link_to_clip(id, clip_id, None, node):
steps = node["inputs"]["steps"] if "steps" in node["inputs"] else 1
return steps
return 1
def find_wildcards_seed(text, prompt):
if "__" in text:
for i in prompt:
if "wildcards" in prompt[i]['class_type'] and text == prompt[i]['inputs']['text']:
return prompt[i]['inputs']['seed_num'] if "seed_num" in prompt[i]['inputs'] else None
else:
return None
class easySave:
def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False,
output_dir=folder_paths.get_temp_directory()):
self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None
self.overwrite_existing = overwrite_existing
self.my_unique_id = my_unique_id
self.prompt = prompt
self.extra_pnginfo = extra_pnginfo
self.type = 'temp'
self.output_dir = output_dir
if self.output_dir != folder_paths.get_temp_directory():
self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id)
if not os.path.exists(self.output_dir):
self._create_directory(self.output_dir)
@staticmethod
def _create_directory(folder: str):
"""Try to create the directory and log the status."""
log_node_warn("", f"Folder {folder} does not exist. Attempting to create...")
if not os.path.exists(folder):
try:
os.makedirs(folder)
log_node_success("",f"{folder} Created Successfully")
except OSError:
log_node_error(f"Failed to create folder {folder}")
pass
@staticmethod
def _map_filename(filename: str, filename_prefix: str) -> Tuple[int, str, Optional[int]]:
"""Utility function to map filename to its parts."""
# Get the prefix length and extract the prefix
prefix_len = len(os.path.basename(filename_prefix))
prefix = filename[:prefix_len]
# Search for the primary digits
digits = re.search(r'(\d+)', filename[prefix_len:])
# Search for the number in brackets after the primary digits
group_id = re.search(r'\((\d+)\)', filename[prefix_len:])
return (int(digits.group()) if digits else 0, prefix, int(group_id.group(1)) if group_id else 0)
@staticmethod
def _format_date(text: str, date: datetime.datetime) -> str:
"""Format the date according to specific patterns."""
date_formats = {
'd': lambda d: d.day,
'dd': lambda d: '{:02d}'.format(d.day),
'M': lambda d: d.month,
'MM': lambda d: '{:02d}'.format(d.month),
'h': lambda d: d.hour,
'hh': lambda d: '{:02d}'.format(d.hour),
'm': lambda d: d.minute,
'mm': lambda d: '{:02d}'.format(d.minute),
's': lambda d: d.second,
'ss': lambda d: '{:02d}'.format(d.second),
'y': lambda d: d.year,
'yy': lambda d: str(d.year)[2:],
'yyy': lambda d: str(d.year)[1:],
'yyyy': lambda d: d.year,
}
# We need to sort the keys in reverse order to ensure we match the longest formats first
for format_str in sorted(date_formats.keys(), key=len, reverse=True):
if format_str in text:
text = text.replace(format_str, str(date_formats[format_str](date)))
return text
@staticmethod
def _gather_all_inputs(prompt: Dict[str, dict], unique_id: str, linkInput: str = '',
collected_inputs: Optional[Dict[str, Union[str, List[str]]]] = None) -> Dict[
str, Union[str, List[str]]]:
"""Recursively gather all inputs from the prompt dictionary."""
if prompt == None:
return None
collected_inputs = collected_inputs or {}
prompt_inputs = prompt[str(unique_id)]["inputs"]
for p_input, p_input_value in prompt_inputs.items():
a_input = f"{linkInput}>{p_input}" if linkInput else p_input
if isinstance(p_input_value, list):
easySave._gather_all_inputs(prompt, p_input_value[0], a_input, collected_inputs)
else:
existing_value = collected_inputs.get(a_input)
if existing_value is None:
collected_inputs[a_input] = p_input_value
elif p_input_value not in existing_value:
collected_inputs[a_input] = existing_value + "; " + p_input_value
# if "text" in collected_inputs:
# del collected_inputs['text']
# print(collected_inputs)
return collected_inputs
@staticmethod
def _get_filename_with_padding(output_dir, filename, number_padding, group_id, ext):
"""Return filename with proper padding."""
try:
filtered = list(filter(lambda a: a[1] == filename,
map(lambda x: easySave._map_filename(x, filename), os.listdir(output_dir))))
last = max(filtered)[0]
for f in filtered:
if f[0] == last:
if f[2] == 0 or f[2] == group_id:
last += 1
counter = last
except (ValueError, FileNotFoundError):
os.makedirs(output_dir, exist_ok=True)
counter = 1
if group_id == 0:
return f"{filename}.{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}.{ext}"
else:
return f"{filename}_({group_id}).{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}_({group_id}).{ext}"
@staticmethod
def filename_parser(output_dir: str, filename_prefix: str, prompt: Dict[str, dict], my_unique_id: str,
number_padding: int, group_id: int, ext: str) -> str:
"""Parse the filename using provided patterns and replace them with actual values."""
subfolder = os.path.dirname(os.path.normpath(filename_prefix))
filename = os.path.basename(os.path.normpath(filename_prefix))
filename = re.sub(r'%date:(.*?)%', lambda m: easySave._format_date(m.group(1), datetime.datetime.now()),
filename_prefix)
all_inputs = easySave._gather_all_inputs(prompt, my_unique_id)
filename = re.sub(r'%(.*?)%', lambda m: str(all_inputs.get(m.group(1), '')), filename)
filename = re.sub(r'[/\\]+', '-', filename)
filename = easySave._get_filename_with_padding(output_dir, filename, number_padding, group_id, ext)
return filename, subfolder
@staticmethod
def folder_parser(output_dir: str, prompt: Dict[str, dict], my_unique_id: str):
output_dir = re.sub(r'%date:(.*?)%', lambda m: easySave._format_date(m.group(1), datetime.datetime.now()),
output_dir)
all_inputs = easySave._gather_all_inputs(prompt, my_unique_id)
return re.sub(r'%(.*?)%', lambda m: str(all_inputs.get(m.group(1), '')), output_dir)
def images(self, images, filename_prefix, output_type, embed_workflow=True, ext="png", group_id=0):
FORMAT_MAP = {
"png": "PNG",
"jpg": "JPEG",
"jpeg": "JPEG",
"bmp": "BMP",
"tif": "TIFF",
"tiff": "TIFF"
}
if ext not in FORMAT_MAP:
raise ValueError(f"Unsupported file extension {ext}")
if output_type == "Hide":
return list()
if output_type in ("Save", "Hide/Save", "Sender/Save"):
output_dir = self.output_dir if self.output_dir != folder_paths.get_temp_directory() else folder_paths.get_output_directory()
self.type = "output"
if output_type in ("Preview", "Sender"):
output_dir = self.output_dir
filename_prefix = 'easyPreview'
results = list()
for image in images:
img = Image.fromarray(np.clip(255. * image.cpu().numpy(), 0, 255).astype(np.uint8))
filename = filename_prefix.replace("%width%", str(img.size[0])).replace("%height%", str(img.size[1]))
filename, subfolder = easySave.filename_parser(output_dir, filename, self.prompt, self.my_unique_id,
self.number_padding, group_id, ext)
file_path = os.path.join(output_dir, filename)
if ext == "png" and embed_workflow in (True, "True"):
metadata = PngInfo()
if self.prompt is not None:
metadata.add_text("prompt", json.dumps(self.prompt))
if hasattr(self, 'extra_pnginfo') and self.extra_pnginfo is not None:
for key, value in self.extra_pnginfo.items():
metadata.add_text(key, json.dumps(value))
if self.overwrite_existing or not os.path.isfile(file_path):
img.save(file_path, pnginfo=metadata, format=FORMAT_MAP[ext])
else:
if self.overwrite_existing or not os.path.isfile(file_path):
img.save(file_path, format=FORMAT_MAP[ext])
else:
log_node_error("",f"File {file_path} already exists... Skipping")
results.append({
"filename": filename,
"subfolder": subfolder,
"type": self.type
})
return results
def textfile(self, text, filename_prefix, output_type, group_id=0, ext='txt'):
if output_type == "Hide":
return []
if output_type in ("Save", "Hide/Save"):
output_dir = self.output_dir if self.output_dir != folder_paths.get_temp_directory() else folder_paths.get_output_directory()
if output_type == "Preview":
filename_prefix = 'easyPreview'
filename = easySave.filename_parser(output_dir, filename_prefix, self.prompt, self.my_unique_id,
self.number_padding, group_id, ext)
file_path = os.path.join(output_dir, filename)
if self.overwrite_existing or not os.path.isfile(file_path):
with open(file_path, 'w') as f:
f.write(text)
else:
log_node_error("", f"File {file_path} already exists... Skipping")
# ---------------------------------------------------------------提示词 开始----------------------------------------------------------------------#
# 正面提示词
class positivePrompt:
def __init__(self):
pass
@classmethod
def INPUT_TYPES(s):
return {"required": {
"positive": ("STRING", {"default": "", "multiline": True, "placeholder": "Positive"}),}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("positive",)
FUNCTION = "main"
CATEGORY = "EasyUse/Prompt"
@staticmethod
def main(positive):
return positive,
# 通配符提示词
class wildcardsPrompt:
def __init__(self):
pass
@classmethod
def INPUT_TYPES(s):
wildcard_list = get_wildcard_list()
return {"required": {
"text": ("STRING", {"default": "", "multiline": True, "dynamicPrompts": False, "placeholder": "(Support Lora Block Weight and wildcard)"}),
"Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),),
"Select to add Wildcard": (["Select the Wildcard to add to the text"] + wildcard_list,),
"seed_num": ("INT", {"default": 0, "min": 0, "max": 1125899906842624}),
},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"},
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("text",)
OUTPUT_NODE = True
FUNCTION = "main"
CATEGORY = "EasyUse/Prompt"
@staticmethod
def main(*args, **kwargs):
my_unique_id = kwargs["my_unique_id"]
extra_pnginfo = kwargs["extra_pnginfo"]
prompt = kwargs["prompt"]
seed_num = kwargs["seed_num"]
# Clean loaded_objects
easyCache.update_loaded_objects(prompt)
my_unique_id = int(my_unique_id)
easy_save = easySave(my_unique_id, prompt, extra_pnginfo)
# if my_unique_id:
# workflow = extra_pnginfo["workflow"]
# node = next((x for x in workflow["nodes"] if str(x["id"]) == my_unique_id), None)
# if node:
# seed_num = prompt[my_unique_id]['inputs']['seed_num'] if 'seed_num' in prompt[my_unique_id][
# 'inputs'] else 0
# length = len(node["widgets_values"])
# node["widgets_values"][length - 2] = seed_num
text = kwargs['text']
return {"ui": {"value": [seed_num]}, "result": (text,)}
# 负面提示词
class negativePrompt:
def __init__(self):
pass
@classmethod
def INPUT_TYPES(s):
return {"required": {
"negative": ("STRING", {"default": "", "multiline": True, "placeholder": "Negative"}),}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("negative",)
FUNCTION = "main"
CATEGORY = "EasyUse/Prompt"
@staticmethod
def main(negative):
return negative,
# 肖像大师
# Created by AI Wiz Art (Stefano Flore)
# Version: 2.2
# https://stefanoflore.it
# https://ai-wiz.art
class portraitMaster:
@classmethod
def INPUT_TYPES(s):
max_float_value = 1.95
prompt_path = Path(os.path.join(Path(__file__).parent.parent, 'resources/portrait_prompt.json'))
if not os.path.exists(prompt_path):
response = urlopen('https://raw.githubusercontent.com/yolain/ComfyUI-Easy-Use/main/resources/portrait_prompt.json')
temp_prompt = json.loads(response.read())
prompt_serialized = json.dumps(temp_prompt, indent=4)
with open(prompt_path, "w") as f:
f.write(prompt_serialized)
del response, temp_prompt
# Load local
with open(prompt_path, 'r') as f:
list = json.load(f)
keys = [
['shot', 'COMBO', {"key": "shot_list"}], ['shot_weight', 'FLOAT'],
['gender', 'COMBO', {"default": "Woman", "key": "gender_list"}], ['age', 'INT', {"default": 30, "min": 18, "max": 90, "step": 1, "display": "slider"}],
['nationality_1', 'COMBO', {"default": "Chinese", "key": "nationality_list"}], ['nationality_2', 'COMBO', {"key": "nationality_list"}], ['nationality_mix', 'FLOAT'],
['body_type', 'COMBO', {"key": "body_type_list"}], ['body_type_weight', 'FLOAT'], ['model_pose', 'COMBO', {"key": "model_pose_list"}], ['eyes_color', 'COMBO', {"key": "eyes_color_list"}],
['facial_expression', 'COMBO', {"key": "face_expression_list"}], ['facial_expression_weight', 'FLOAT'], ['face_shape', 'COMBO', {"key": "face_shape_list"}], ['face_shape_weight', 'FLOAT'], ['facial_asymmetry', 'FLOAT'],
['hair_style', 'COMBO', {"key": "hair_style_list"}], ['hair_color', 'COMBO', {"key": "hair_color_list"}], ['disheveled', 'FLOAT'], ['beard', 'COMBO', {"key": "beard_list"}],
['skin_details', 'FLOAT'], ['skin_pores', 'FLOAT'], ['dimples', 'FLOAT'], ['freckles', 'FLOAT'],
['moles', 'FLOAT'], ['skin_imperfections', 'FLOAT'], ['skin_acne', 'FLOAT'], ['tanned_skin', 'FLOAT'],
['eyes_details', 'FLOAT'], ['iris_details', 'FLOAT'], ['circular_iris', 'FLOAT'], ['circular_pupil', 'FLOAT'],
['light_type', 'COMBO', {"key": "light_type_list"}], ['light_direction', 'COMBO', {"key": "light_direction_list"}], ['light_weight', 'FLOAT']
]
widgets = {}
for i, obj in enumerate(keys):
if obj[1] == 'COMBO':
key = obj[2]['key'] if obj[2] and 'key' in obj[2] else obj[0]
_list = list[key].copy()
_list.insert(0, '-')
widgets[obj[0]] = (_list, {**obj[2]})
elif obj[1] == 'FLOAT':
widgets[obj[0]] = ("FLOAT", {"default": 0, "step": 0.05, "min": 0, "max": max_float_value, "display": "slider",})
elif obj[1] == 'INT':
widgets[obj[0]] = (obj[1], obj[2])
del list
return {
"required": {
**widgets,
"photorealism_improvement": (["enable", "disable"],),
"prompt_start": ("STRING", {"multiline": True, "default": "raw photo, (realistic:1.5)"}),
"prompt_additional": ("STRING", {"multiline": True, "default": ""}),
"prompt_end": ("STRING", {"multiline": True, "default": ""}),
"negative_prompt": ("STRING", {"multiline": True, "default": ""}),
}
}
RETURN_TYPES = ("STRING", "STRING",)
RETURN_NAMES = ("positive", "negative",)
FUNCTION = "pm"
CATEGORY = "EasyUse/Prompt"
def pm(self, shot="-", shot_weight=1, gender="-", body_type="-", body_type_weight=0, eyes_color="-",
facial_expression="-", facial_expression_weight=0, face_shape="-", face_shape_weight=0,
nationality_1="-", nationality_2="-", nationality_mix=0.5, age=30, hair_style="-", hair_color="-",
disheveled=0, dimples=0, freckles=0, skin_pores=0, skin_details=0, moles=0, skin_imperfections=0,
wrinkles=0, tanned_skin=0, eyes_details=1, iris_details=1, circular_iris=1, circular_pupil=1,
facial_asymmetry=0, prompt_additional="", prompt_start="", prompt_end="", light_type="-",
light_direction="-", light_weight=0, negative_prompt="", photorealism_improvement="disable", beard="-",
model_pose="-", skin_acne=0):
prompt = []
if gender == "-":
gender = ""
else:
if age <= 25 and gender == 'Woman':
gender = 'girl'
if age <= 25 and gender == 'Man':
gender = 'boy'
gender = " " + gender + " "
if nationality_1 != '-' and nationality_2 != '-':
nationality = f"[{nationality_1}:{nationality_2}:{round(nationality_mix, 2)}]"
elif nationality_1 != '-':
nationality = nationality_1 + " "
elif nationality_2 != '-':
nationality = nationality_2 + " "
else:
nationality = ""
if prompt_start != "":
prompt.append(f"{prompt_start}")
if shot != "-" and shot_weight > 0:
prompt.append(f"({shot}:{round(shot_weight, 2)})")
prompt.append(f"({nationality}{gender}{round(age)}-years-old:1.5)")
if body_type != "-" and body_type_weight > 0:
prompt.append(f"({body_type}, {body_type} body:{round(body_type_weight, 2)})")
if model_pose != "-":
prompt.append(f"({model_pose}:1.5)")
if eyes_color != "-":
prompt.append(f"({eyes_color} eyes:1.25)")
if facial_expression != "-" and facial_expression_weight > 0:
prompt.append(
f"({facial_expression}, {facial_expression} expression:{round(facial_expression_weight, 2)})")
if face_shape != "-" and face_shape_weight > 0:
prompt.append(f"({face_shape} shape face:{round(face_shape_weight, 2)})")
if hair_style != "-":
prompt.append(f"({hair_style} hairstyle:1.25)")
if hair_color != "-":
prompt.append(f"({hair_color} hair:1.25)")
if beard != "-":
prompt.append(f"({beard}:1.15)")
if disheveled != "-" and disheveled > 0:
prompt.append(f"(disheveled:{round(disheveled, 2)})")
if prompt_additional != "":
prompt.append(f"{prompt_additional}")
if skin_details > 0:
prompt.append(f"(skin details, skin texture:{round(skin_details, 2)})")
if skin_pores > 0:
prompt.append(f"(skin pores:{round(skin_pores, 2)})")
if skin_imperfections > 0:
prompt.append(f"(skin imperfections:{round(skin_imperfections, 2)})")
if skin_acne > 0:
prompt.append(f"(acne, skin with acne:{round(skin_acne, 2)})")
if wrinkles > 0:
prompt.append(f"(skin imperfections:{round(wrinkles, 2)})")
if tanned_skin > 0:
prompt.append(f"(tanned skin:{round(tanned_skin, 2)})")
if dimples > 0:
prompt.append(f"(dimples:{round(dimples, 2)})")
if freckles > 0:
prompt.append(f"(freckles:{round(freckles, 2)})")
if moles > 0:
prompt.append(f"(skin pores:{round(moles, 2)})")
if eyes_details > 0:
prompt.append(f"(eyes details:{round(eyes_details, 2)})")
if iris_details > 0:
prompt.append(f"(iris details:{round(iris_details, 2)})")
if circular_iris > 0:
prompt.append(f"(circular iris:{round(circular_iris, 2)})")
if circular_pupil > 0:
prompt.append(f"(circular pupil:{round(circular_pupil, 2)})")
if facial_asymmetry > 0:
prompt.append(f"(facial asymmetry, face asymmetry:{round(facial_asymmetry, 2)})")
if light_type != '-' and light_weight > 0:
if light_direction != '-':
prompt.append(f"({light_type} {light_direction}:{round(light_weight, 2)})")
else:
prompt.append(f"({light_type}:{round(light_weight, 2)})")
if prompt_end != "":
prompt.append(f"{prompt_end}")
prompt = ", ".join(prompt)
prompt = prompt.lower()
if photorealism_improvement == "enable":
prompt = prompt + ", (professional photo, balanced photo, balanced exposure:1.2), (film grain:1.15)"
if photorealism_improvement == "enable":
negative_prompt = negative_prompt + ", (shinny skin, reflections on the skin, skin reflections:1.25)"
log_node_info("Portrait Master as generate the prompt:", prompt)
return (prompt, negative_prompt,)
# 潜空间sigma相乘
class latentMultiplyBySigma:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
"scheduler": (comfy.samplers.KSampler.SCHEDULERS,),
"steps": ("INT", {"default": 10000, "min": 0, "max": 10000}),
"start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
"end_at_step": ("INT", {"default": 10000, "min": 1, "max": 10000}),
},
"optional": {
"pipe": ("PIPE_LINE",),
"optional_model": ("MODEL",),
"optional_latent": ("LATENT",)
}}
RETURN_TYPES = ("PIPE_LINE", "LATENT", "FLOAT",)
RETURN_NAMES = ("pipe", "latent", "sigma",)
FUNCTION = "run"
CATEGORY = "EasyUse/Latent"
def run(self, sampler_name, scheduler, steps, start_at_step, end_at_step, pipe=None, optional_model=None, optional_latent=None):
model = optional_model if optional_model is not None else pipe["model"]
samples = optional_latent if optional_latent is not None else pipe["samples"]
device = comfy.model_management.get_torch_device()
end_at_step = min(steps, end_at_step)
start_at_step = min(start_at_step, end_at_step)
real_model = None
comfy.model_management.load_model_gpu(model)
real_model = model.model
sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name,
scheduler=scheduler, denoise=1.0, model_options=model.model_options)
sigmas = sampler.sigmas
sigma = sigmas[start_at_step] - sigmas[end_at_step]
sigma /= model.model.latent_format.scale_factor
sigma = sigma.cpu().numpy()
samples_out = samples.copy()
s1 = samples["samples"]
samples_out["samples"] = s1 * sigma
if pipe is None:
pipe = {}
new_pipe = {
**pipe,
"samples": samples_out
}
del pipe
return (new_pipe, samples_out, sigma)
# Latent遮罩复合
class latentCompositeMaskedWithCond:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"pipe": ("PIPE_LINE",),
"text_combine": ("STRING", {"default": ""}),
"source_latent": ("LATENT",),
"source_mask": ("MASK",),
"new_mask": ("MASK",),
},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"},
}
RETURN_TYPES = ("PIPE_LINE", "LATENT", "CONDITIONING")
RETURN_NAMES = ("pipe", "latent", "conditioning",)
FUNCTION = "run"
OUTPUT_MODE = True
CATEGORY = "EasyUse/Latent"
def run(self, pipe, text_combine, source_latent, source_mask, new_mask, prompt=None, extra_pnginfo=None, my_unique_id=None):
clip = pipe["clip"]
destination_latent = pipe["samples"]
positive = pipe["loader_settings"]["positive"] + ',' + text_combine
positive_token_normalization = pipe["loader_settings"]["positive_token_normalization"]
positive_weight_interpretation = pipe["loader_settings"]["positive_weight_interpretation"]
a1111_prompt_style = pipe["loader_settings"]["a1111_prompt_style"]
positive_cond = pipe["positive"]
log_node_warn("正在处理提示词编码...")
# Use new clip text encode by smzNodes like same as webui, when if you installed the smzNodes
if a1111_prompt_style:
if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS:
cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode']
steps = pipe["steps"]
positive_embeddings_final, = cls().encode(clip, positive, "A1111", True, True, False, False, 6, 1024,
1024, 0, 0, 1024, 1024, '', '', steps)
else:
raise Exception(f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'")
else:
positive_embeddings_final, positive_pooled = advanced_encode(clip, positive,
positive_token_normalization,
positive_weight_interpretation, w_max=1.0,
apply_to_pooled='enable')
positive_embeddings_final = [[positive_embeddings_final, {"pooled_output": positive_pooled}]]
# source cond
(cond_1,) = ConditioningSetMask().append(positive_cond, source_mask, "default", 1)
(cond_2,) = ConditioningSetMask().append(positive_embeddings_final, new_mask, "default", 1)
positive_cond = cond_1 + cond_2
# latent composite masked
(samples,) = LatentCompositeMasked().composite(destination_latent, source_latent, 0, 0, False)
new_pipe = {
**pipe,
"positive": positive_cond,
"samples": samples,
"loader_settings": {
**pipe["loader_settings"],
"positive": positive,
}
}
del pipe
return (new_pipe, samples, positive_cond)
# 随机种
class easySeed:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"seed_num": ("INT", {"default": 0, "min": 0, "max": 1125899906842624}),
},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"},
}
RETURN_TYPES = ("INT",)
RETURN_NAMES = ("seed_num",)
FUNCTION = "doit"
CATEGORY = "EasyUse/Seed"
OUTPUT_NODE = True
def doit(self, seed_num=0, prompt=None, extra_pnginfo=None, my_unique_id=None):
return seed_num,
# 全局随机种
class globalSeed:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"value": ("INT", {"default": 0, "min": 0, "max": 1125899906842624}),
"mode": ("BOOLEAN", {"default": True, "label_on": "control_before_generate", "label_off": "control_after_generate"}),
"action": (["fixed", "increment", "decrement", "randomize",
"increment for each node", "decrement for each node", "randomize for each node"], ),
"last_seed": ("STRING", {"default": ""}),
}
}
RETURN_TYPES = ()
FUNCTION = "doit"
CATEGORY = "EasyUse/Seed"
OUTPUT_NODE = True
def doit(self, **kwargs):
return {}
#---------------------------------------------------------------提示词 结束------------------------------------------------------------------------#
#---------------------------------------------------------------加载器 开始----------------------------------------------------------------------#
# 简易加载器完整
class fullLoader:
@classmethod
def INPUT_TYPES(cls):
resolution_strings = [f"{width} x {height}" for width, height in BASE_RESOLUTIONS]
a1111_prompt_style_default = False
return {"required": {
"ckpt_name": (folder_paths.get_filename_list("checkpoints"),),
"config_name": (["Default", ] + folder_paths.get_filename_list("configs"), {"default": "Default"}),
"vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),),
"clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}),
"lora_name": (["None"] + folder_paths.get_filename_list("loras"),),
"lora_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
"lora_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
"resolution": (resolution_strings,),
"empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"positive": ("STRING", {"default": "Positive", "multiline": True}),
"positive_token_normalization": (["none", "mean", "length", "length+mean"],),
"positive_weight_interpretation": (["comfy", "A1111", "comfy++", "compel", "fixed attention"],),
"negative": ("STRING", {"default": "Negative", "multiline": True}),
"negative_token_normalization": (["none", "mean", "length", "length+mean"],),
"negative_weight_interpretation": (["comfy", "A1111", "comfy++", "compel", "fixed attention"],),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),
},
"optional": {"model_override": ("MODEL",), "clip_override": ("CLIP",), "vae_override": ("VAE",), "optional_lora_stack": ("LORA_STACK",), "a1111_prompt_style": ("BOOLEAN", {"default": a1111_prompt_style_default}),},
"hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}
}
RETURN_TYPES = ("PIPE_LINE", "MODEL", "VAE", "CLIP")
RETURN_NAMES = ("pipe", "model", "vae", "clip")
FUNCTION = "adv_pipeloader"
CATEGORY = "EasyUse/Loaders"
def adv_pipeloader(self, ckpt_name, config_name, vae_name, clip_skip,
lora_name, lora_model_strength, lora_clip_strength,
resolution, empty_latent_width, empty_latent_height,
positive, positive_token_normalization, positive_weight_interpretation,
negative, negative_token_normalization, negative_weight_interpretation,
batch_size, model_override=None, clip_override=None, vae_override=None, optional_lora_stack=None, a1111_prompt_style=False, prompt=None,
my_unique_id=None
):
model: ModelPatcher | None = None
clip: CLIP | None = None
vae: VAE | None = None
can_load_lora = True
pipe_lora_stack = []
# resolution
if resolution != "自定义 x 自定义":
try:
width, height = map(int, resolution.split(' x '))
empty_latent_width = width
empty_latent_height = height
except ValueError:
raise ValueError("Invalid base_resolution format.")
# Create Empty Latent
latent = torch.zeros([batch_size, 4, empty_latent_height // 8, empty_latent_width // 8]).cpu()
samples = {"samples": latent}
# Clean models from loaded_objects
easyCache.update_loaded_objects(prompt)
log_node_warn("正在处理模型...")
# 判断是否存在 模型叠加xyplot, 若存在优先缓存第一个模型
xyinputs_id = next((x for x in prompt if str(prompt[x]["class_type"]) == "easy XYInputs: ModelMergeBlocks"), None)
if xyinputs_id is not None:
node = prompt[xyinputs_id]
if "ckpt_name_1" in node["inputs"]:
ckpt_name_1 = node["inputs"]["ckpt_name_1"]
model, clip, vae = easyCache.load_checkpoint(ckpt_name_1)
can_load_lora = False
# Load models
elif model_override is not None and clip_override is not None and vae_override is not None:
model = model_override
clip = clip_override
vae = vae_override
elif model_override is not None:
raise Exception(f"[ERROR] clip or vae is missing")
elif vae_override is not None:
raise Exception(f"[ERROR] model or clip is missing")
elif clip_override is not None:
raise Exception(f"[ERROR] model or vae is missing")
else:
model, clip, vae = easyCache.load_checkpoint(ckpt_name, config_name)
if optional_lora_stack is not None:
for lora in optional_lora_stack:
if can_load_lora:
model, clip = easyCache.load_lora(lora[0], model, clip, lora[1], lora[2])
pipe_lora_stack.append({"lora_name": lora[0], "model": model, "clip": clip, "lora_model_strength": lora[1], "lora_clip_strength": lora[2]})
if lora_name != "None":
if can_load_lora:
model, clip = easyCache.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength)
pipe_lora_stack.append({"lora_name": lora_name, "model": model, "clip": clip, "lora_model_strength": lora_model_strength,
"lora_clip_strength": lora_clip_strength})
# Check for custom VAE
if vae_name not in ["Baked VAE", "Baked-VAE"]:
vae = easyCache.load_vae(vae_name)
# CLIP skip
if not clip:
raise Exception("No CLIP found")
log_node_warn("正在处理提示词...")
positive_seed = find_wildcards_seed(positive, prompt) | model, clip, positive, positive_decode, show_positive_prompt, pipe_lora_stack = process_with_loras(positive, model, clip, "Positive", positive_seed, can_load_lora, pipe_lora_stack) | 7 | 2023-12-10 07:02:36+00:00 | 12k |
Open-All-Scale-Causal-Engine/OpenASCE | openasce/inference/graph_inference.py | [
{
"identifier": "CausalGraph",
"path": "openasce/discovery/causal_graph.py",
"snippet": "class CausalGraph(object):\n \"\"\"Causal Graph Class\n\n Represent the casual graph\n\n \"\"\"\n\n DEFAULT_COLUMN_NAME_PREFIX = \"x\"\n\n def __init__(self, names=[], bn=None, w: np.ndarray = None):\n \"\"\"Constructor\n\n Arguments:\n names: the node names\n bn: basic causal graph\n w: the connection matrix for causal graph\n\n \"\"\"\n self.para = None\n self.parents = {} # {c1:[p1, p2],c2:[p2,p3]....}\n self.names_to_index = {}\n self.index_to_names = {}\n self.n = 0\n self.index_exclude = []\n if bn is not None:\n self.copy(bn)\n else:\n if names:\n self.names_init(names)\n if w is not None:\n if self.names_to_index and self.index_to_names and self.parents:\n pass\n else:\n self.names_init(\n [\n self.DEFAULT_COLUMN_NAME_PREFIX + str(i)\n for i in range(w.shape[0])\n ]\n )\n nz = w.nonzero()\n for _ in map(lambda x: self.add_edge(x[0], x[1]), zip(nz[0], nz[1])):\n pass\n\n def names_init(self, names: List[str]) -> None:\n \"\"\"Initialize the graph with feature names\n\n initialize the names_to_index and index_to_names attributes\n initialize parents[i] = set() (no edges for the moment)\n\n Arguments:\n names (list of string): the names of the nodes\n\n Returns:\n None\n \"\"\"\n tmp_names = copy.deepcopy(names)\n self.names_to_index = {name: index for index, name in enumerate(names)}\n self.index_to_names = {index: name for index, name in enumerate(tmp_names)}\n self.n = len(self.names_to_index)\n for i in range(self.n):\n self.parents[i] = set()\n\n def parents_exclude(self, name_list: List[str]) -> None:\n for name in name_list:\n self.index_exclude.append(self.names_to_index[name])\n\n def random_init(self, max_parents: int = None) -> None:\n \"\"\"Add edges randomly\n\n For each node, pick a random number of the desired number of parents.\n Then, for each candidate, pick another random number. In average,\n the node will have the desired number of parents.\n\n Arguments:\n max_parents: maximal number of one node's parents\n \"\"\"\n max_parents = max_parents if max_parents else self.n - 1\n\n for i in range(self.n):\n nparents = np.random.randint(0, max_parents + 1)\n p = nparents / (self.n + 1.0)\n for j in range(self.n):\n if j != i and np.random.uniform() < p:\n self.add_edge(j, i)\n\n def merge(\n self, g1, g2, p1=1, p2=1, max_parents: int = None, mut_rate: float = 0.0\n ) -> None:\n \"\"\"Pick up edges from both g1 and g2 according to some random policy\n\n Arguments:\n g1 (CausalGraph)\n g1 (CausalGraph)\n p1 (float in [0,1]): proba of an edge in g1 being in self\n p2 (float in [0,1]): proba of an edge in g2 being in self\n p1 + p2 = 1\n max_parents (int)\n\n \"\"\"\n # merge randomly the two graphs\n self.random_merge(g1, g2, p1, p2)\n\n # introduce mutations\n self.mutate(mut_rate)\n\n # remove extra parents\n self.remove_extra_parents(max_parents)\n\n def random_merge(self, g1, g2, p1, p2) -> None:\n \"\"\"Creates graph from edges both in g1 and g2. Adds edges according to proba p1 and p2\n\n Arguments:\n g1 (CausalGraph)\n g1 (CausalGraph)\n p1 (float in [0,1]): proba of an edge in g1 being in self\n p2 (float in [0,1]): proba of an edge in g2 being in self\n \"\"\"\n for i, js in g1.parents.items():\n for j in js:\n if np.random.uniform() < p1 or p1 == 1:\n self.add_edge(j, i)\n for i, js in g2.parents.items():\n for j in js:\n if np.random.uniform() < p2 or p2 == 1:\n self.add_edge(j, i)\n\n def mutate(self, mut_rate: float = 0) -> None:\n \"\"\"Introduces new edges with a probability mut_rate\n\n Arguments:\n mut_rate (float in [0,1]): proba of mutation\n \"\"\"\n if mut_rate != 0:\n \"\"\"Do mutation like the following code snippet\n for i in range(self.n):\n for j in range(self.n):\n p = np.random.uniform()\n if p < mut_rate:\n if p < mut_rate / 2:\n self.add_edge(i, j)\n else:\n self.remove_edge(i, j)\n \"\"\"\n for _ in map(\n lambda x: self.add_edge(x[0], x[1])\n if x[2] < 0.25\n else self.remove_edge(x[0], x[1]),\n filter(\n lambda x: x[2] <= 0.5,\n map(\n lambda x: x + (np.random.uniform(),),\n itertools.product(self.n, self.n),\n ),\n ),\n ):\n pass\n\n def remove_extra_parents(self, max_parents: int = None) -> None:\n \"\"\"Removes extra edges if does not respect max parents constraint\n\n Arguments:\n max_parents: the maximal number of the node's parents\n \"\"\"\n if max_parents is not None:\n for i, js in self.parents.items():\n if len(js) > max_parents:\n indices = np.random.permutation(range(len(js)))\n for j in indices[0 : len(js) - max_parents]:\n self.remove_edge(j, i)\n\n def num_save(self, file_name: str) -> None:\n \"\"\"\n Saves the graph in number format\n\n Example\n parent1, child1\n parent2, child2\n\n Arguments:\n file_name: saved file path\n \"\"\"\n with open(file_name, \"w\") as f:\n for child_index, parents in self.parents.items():\n for parent_index in parents:\n f.write(f\"{parent_index},{child_index}\\n\")\n\n def save(self, file_path: str) -> None:\n \"\"\"Saves the graph in the desired format\n\n Example\n parent1, child1\n parent2, child2\n Arguments:\n file_path: saved file path\n \"\"\"\n with open(file_path, \"w\") as f:\n for child_index, parents in self.parents.items():\n for parent_index in parents:\n parent = self.index_to_names.get(parent_index)\n child = self.index_to_names.get(child_index)\n f.write(f\"{parent},{child}\\n\")\n\n def load(self, file_name: str) -> None:\n \"\"\"Loads structure from file. See save method\n\n Arguments:\n file_name: the path of the file to be loaded\n \"\"\"\n if not (self.names_to_index and self.index_to_names):\n name_set = set()\n # Go through the file to get all node names\n with open(file_name) as f:\n for line in f:\n line = line.strip().split(\",\")\n if len(line) == 2:\n p = line[0].replace(\"'\", \"\").replace('\"', \"\").strip()\n c = line[1].replace(\"'\", \"\").replace('\"', \"\").strip()\n if p not in name_set:\n name_set.add(p)\n if c not in name_set:\n name_set.add(c)\n self.names_to_index = {name: index for index, name in enumerate(name_set)}\n self.index_to_names = {index: name for index, name in enumerate(name_set)}\n with open(file_name) as f:\n for line in f:\n line = line.strip().split(\",\")\n if len(line) == 2:\n p = line[0].replace(\"'\", \"\").replace('\"', \"\").strip()\n c = line[1].replace(\"'\", \"\").replace('\"', \"\").strip()\n logger.info(f\"p={p}, c={c}\")\n p_index, c_index = self.names_to_index[p], self.names_to_index[c]\n self.add_edge(p_index, c_index)\n\n def is_cyclic(self) -> bool:\n \"\"\"Returns True if a cycle is found else False.\n\n Iterates over the nodes to find all the parents' parents, etc.\n A cycle is found if a node belongs to its own parent's set.\n\n \"\"\"\n all_parents = copy.deepcopy(self.parents)\n update = True\n while update:\n update = False\n for i in range(self.n):\n parents = list(all_parents[i])\n nparents = len(parents)\n for p in parents:\n all_parents[i].update(all_parents[p])\n if nparents != len(all_parents[i]):\n update = True\n if i in all_parents[i]:\n return True\n return False\n\n def copy(self, cg) -> None:\n \"\"\"Copies the structure of cg inside self and erases everything else\n\n Arguments:\n cg (CausalGraph): model\n \"\"\"\n self.index_to_names = copy.deepcopy(cg.index_to_names)\n self.names_to_index = copy.deepcopy(cg.names_to_index)\n self.n = cg.n\n self.parents = copy.deepcopy(cg.parents)\n\n def add_edge(\n self, parent: Union[int, str], child: Union[int, str], max_parents=None\n ) -> bool:\n \"\"\"Adds edge if respects max parents constraint and does not create a cycle\n\n Arguments:\n parent (int): id of parent\n child (int): id of child\n max_parents (int): None means no constraints\n\n Returns\n True if actually added the edge and False means no way to add the edge\n \"\"\"\n parent = self.names_to_index.get(parent) if isinstance(parent, str) else parent\n child = self.names_to_index.get(child) if isinstance(child, str) else child\n if (\n parent is None\n or child is None\n or parent >= self.n\n or child >= self.n\n or parent == child\n ):\n raise ValueError(f\"Error parent or child\")\n if max_parents is not None and len(self.parents[child]) >= max_parents:\n return False\n if child not in self.parents:\n self.parents[child] = set()\n self.parents[child].add(parent)\n if self.is_cyclic():\n logger.debug(\n f\"The edge from {parent} to {child} produces a cycle and be refused\"\n )\n self.remove_edge(parent, child)\n return False\n return True\n\n def remove_edge(self, parent: int, child: int, force: bool = True) -> None:\n try:\n self.parents[child].remove(parent)\n except Exception as e:\n if force:\n logger.debug(f\"Exception happens in remove edge: \\n{e}\")\n else:\n raise e\n\n def score(self, data: np.ndarray, rd: Dict[int, int] = None) -> float:\n \"\"\"Computes bayesian score of the structure given some data assuming uniform prior\n\n Example\n s = cg.score(data)\n\n Arguments:\n data: (nsamples, nfeatures)\n\n Returns\n s (float): bayesian score\n\n \"\"\"\n s = 0\n r = rd if rd else self.compute_r(data)\n for i in range(self.n):\n s += self.score_node(i, data, r)\n return s\n\n def compute_r(self, data: np.ndarray) -> dict:\n \"\"\"Compute the number of the value for each node\n\n Arguments:\n data (np array): (nsamples, nfeatures)\n Returns\n r (dict): r[i] = r_i\n \"\"\"\n r = {}\n for i in range(self.n):\n r[i] = np.unique(data[:, i]).shape[0]\n return r\n\n def score_node(self, i, data: np.ndarray, r) -> float:\n \"\"\"Compute the score of node i\n\n Arguments:\n i (int): node\n data (np array): (nsamples, nfeatures)\n r (dict of np array): r[i] = nb possible instances of i\n Returns\n s (float): contribution to log score of node i\n \"\"\"\n m, m0 = Counter(), Counter()\n columns = [i] + list(self.parents.get(i))\n extracted_data = data[:, columns]\n # counting nb of each instance of (node, parents) and (parents)\n for sample in extracted_data:\n m[tuple(sample)] += 1\n m0[tuple(sample[1:])] += 1\n # Adding contribution to the score (assuming uniform prior)\n s: float = 0.0\n \"\"\"Like following code snippet\n for c in m0.values():\n s -= gammaln(r[i] + c)\n s += gammaln(r[i])\n \"\"\"\n stat_i = r[i]\n s -= sum(gammaln(stat_i + c) for c in m0.values())\n s += gammaln(stat_i) * len(m0.values())\n \"\"\"Like following code snippet\n for c in m.values():\n s += gammaln(1 + c)\n \"\"\"\n s += sum(gammaln(1 + c) for c in m.values())\n return s\n\n def calculate_parameter(self, data: np.ndarray, rd: Dict[int, int] = None):\n \"\"\"Calculate the edge weight in the graph\n\n Arguments:\n data: samples\n rd: r[i] = r_i\n \"\"\"\n r = rd if rd else self.compute_r(data)\n node_param = {}\n aux_para_cp = {}\n for i in self.parents.keys():\n if i not in node_param:\n node_param[i] = {}\n if i not in aux_para_cp:\n aux_para_cp[i] = {}\n list_par = [i] + list(self.parents[i])\n data_par = data[:, list_par]\n all_count = 0\n column_list = [self.index_to_names[k] for k in list_par]\n for data_line in data_par:\n tup_k = tuple(data_line)\n if tup_k in aux_para_cp[i].keys():\n aux_para_cp[i][tup_k] += 1\n else:\n aux_para_cp[i][tup_k] = 1\n name = \"\"\n for k in range(len(list_par)):\n name += self.index_to_names[list_par[k]] + \" = {} \".format(\n data_line[k]\n )\n if name in node_param[i].keys():\n node_param[i][name] += 1\n else:\n node_param[i][name] = 1\n all_count += 1\n count = 1\n for k_s in r.keys():\n if k_s in list_par:\n count *= r[k_s]\n for tup_key in node_param[i].keys():\n node_param[i][tup_key] = (1 + node_param[i][tup_key]) / (\n count + all_count\n )\n df_res = []\n for tup_key in aux_para_cp[i].keys():\n aux_para_cp[i][tup_key] = (1 + aux_para_cp[i][tup_key]) / (\n count + all_count\n )\n list_tmp = list(tup_key)\n list_tmp.append(aux_para_cp[i][tup_key])\n df_res.append(list_tmp)\n column_list.append(GraphNodeForm.SCORE_COLUMN_NAME)\n p_ = GraphNodeForm(df_res, columns=column_list)\n node_param[i] = p_\n self.para = node_param\n return self.para"
},
{
"identifier": "Discovery",
"path": "openasce/discovery/discovery.py",
"snippet": "class Discovery(Runtime):\n \"\"\"Discovery Class\n\n Base class of the causal discovery\n\n Attributes:\n node_names (List[str]): the name of graph node, which should be set before fit\n\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._node_names = []\n\n def fit(self, *, X: Union[np.ndarray, Callable], **kwargs) -> None:\n \"\"\"Feed the sample data and search the causal relation on them\n\n Arguments:\n X: Features of the samples.\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(f\"Not implement for abstract class\")\n\n def get_result(self):\n \"\"\"Output the causal graph\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(f\"Not implement for abstract class\")\n\n @property\n def node_names(self):\n return self._node_names\n\n @node_names.setter\n def node_names(self, value: List[str]):\n self._node_names = value"
},
{
"identifier": "GraphNodeForm",
"path": "openasce/discovery/graph_node_form.py",
"snippet": "class GraphNodeForm(object):\n SCORE_COLUMN_NAME = \"node_score_value\"\n\n def __init__(self, input_data: List[List[float]], columns: List[str]) -> None:\n self._columns = copy.deepcopy(columns) # ['col1', 'col2']\n if GraphNodeForm.SCORE_COLUMN_NAME in columns:\n self._data = np.array(input_data, dtype=np.float64) # np.ndarray\n else:\n self._columns.append(GraphNodeForm.SCORE_COLUMN_NAME)\n self._data = np.array(input_data, dtype=np.float64) # np.ndarray\n self._data = np.column_stack((self._data, np.zeros(self._data.shape[0])))\n self._score_column_index = self._columns.index(GraphNodeForm.SCORE_COLUMN_NAME)\n\n @property\n def size(self):\n return len(self._data)\n\n @property\n def columns(self):\n return self._columns\n\n @property\n def data(self):\n return self._data\n\n @property\n def score_column_index(self):\n return self._score_column_index\n\n def index(self, key: str):\n return self._columns.index(key)\n\n def set_flag_zero(self, key: str, value_list: List[int]) -> None:\n \"\"\"set score column to 0 if the value of key column is not in input value_list\n\n Arguments:\n key: the column name\n value_list: the values need to be set\n Returns:\n None\n \"\"\"\n key_index = self._columns.index(key)\n score_column_index = self._score_column_index\n curr_data = self._data\n for i, row in enumerate(curr_data):\n if int(row[key_index]) not in value_list:\n curr_data[i, score_column_index] = 0\n\n def set_norm(self) -> None:\n \"\"\"normalize the value of score column\"\"\"\n score_column_index = self._score_column_index\n curr_data = self._data\n prob_sum = (\n curr_data[:, score_column_index].sum() + 0.00000001\n ) # avoid zero as divisor\n for row in curr_data:\n row[score_column_index] /= prob_sum\n\n def multiply_score_column(self, key: str, ext) -> None:\n \"\"\"multiply ext's score column to local score column for same key column's value\n\n Arguments:\n key: the column name\n ext (GraphNodeForm): another GraphNodeForm\n Returns:\n None\n \"\"\"\n key_index = self._columns.index(key)\n curr_data = self._data\n score_column_index = self._score_column_index\n external_key_index = ext._columns.index(key)\n external_data = ext._data\n ext_score_column_index = ext._score_column_index\n for row in curr_data:\n for ext_row in external_data:\n if row[key_index] == ext_row[external_key_index]:\n row[score_column_index] *= ext_row[ext_score_column_index]\n\n def sort_by_column(self, key: str) -> None:\n \"\"\"sort specified column\n\n Arguments:\n key: the column name\n\n Returns:\n None\n \"\"\"\n key_index = self._columns.index(key)\n curr_data = self._data\n self._data = np.array(sorted(curr_data, key=lambda x: x[key_index]))\n\n def get_score_deviation(self, addition):\n \"\"\"multiply ext's score column to local score column for same key column's value\n\n Arguments:\n addition: Another GraphNodeForm used to calculate the deviation\n Returns:\n Calculation result\n \"\"\"\n curr_data = self._data\n score_column_index = self._score_column_index\n external_data = addition.data\n ext_score_column_index = addition._score_column_index\n t = np.abs(\n curr_data[:, score_column_index : score_column_index + 1]\n - external_data[:, ext_score_column_index : ext_score_column_index + 1]\n )\n return t.sum()\n\n def get_score_value(self, target_key: str, target_value: int):\n \"\"\"multiply ext's score column to local score column for same key column's value\n\n Arguments:\n target_key: the column name\n target_value: the column value\n\n Returns:\n\n \"\"\"\n key_index = self._columns.index(target_key)\n curr_data = self._data\n score_column_index = self._score_column_index\n for row in curr_data:\n if int(row[key_index]) == target_value:\n return row[score_column_index]\n raise ValueError(f\"Not target value exists\")\n\n def set_groupby_sum(self, key: str):\n \"\"\"multiply ext's score column to local score column for same key column's value\n\n Arguments:\n key: the column name\n\n Returns:\n\n \"\"\"\n key_index = self._columns.index(key)\n curr_data = self._data\n score_column_index = self._score_column_index\n ac = {}\n for row in curr_data:\n if int(row[key_index]) in ac:\n ac[int(row[key_index])] += row[score_column_index]\n else:\n ac[int(row[key_index])] = row[score_column_index]\n result_data = np.zeros(shape=(len(ac), 2), dtype=np.float64)\n line_num = 0\n for k1, value in ac.items():\n result_data[line_num] = np.array([k1, value], dtype=np.float64)\n line_num += 1\n self._data = result_data\n self._columns = [key, GraphNodeForm.SCORE_COLUMN_NAME]\n self._score_column_index = self._columns.index(GraphNodeForm.SCORE_COLUMN_NAME)\n\n def __str__(self):\n np.set_printoptions(threshold=5000, suppress=True)\n return self.columns.__str__() + \"\\n\" + self._data.__str__() + \"\\n\""
},
{
"identifier": "InferenceModel",
"path": "openasce/inference/inference_model.py",
"snippet": "class InferenceModel(Runtime):\n \"\"\"Inference Class\n\n Base class of the causal inference\n\n Attributes:\n\n \"\"\"\n\n CONDITION_DICT_NAME = \"condition\"\n TREATMENT_VALUE = \"treatment_value\"\n LABEL_VALUE = \"label_value\"\n\n def __init__(self) -> None:\n super().__init__()\n\n @property\n def data(self):\n \"\"\"Return the sample data\"\"\"\n raise NotImplementedError(f\"Not implement for abstract class\")\n\n def fit(\n self,\n *,\n X: Iterable[np.ndarray],\n Y: Iterable[np.ndarray],\n T: Iterable[np.ndarray],\n **kwargs,\n ) -> None:\n \"\"\"Feed the sample data and train the model used to effect on the samples.\n\n Arguments:\n X: Features of the samples.\n Y: Outcomes of the samples.\n T: Treatments of the samples.\n\n Returns:\n None\n \"\"\"\n pass\n\n def estimate(\n self,\n *,\n X: Iterable[np.ndarray],\n T: Iterable[np.ndarray],\n **kwargs,\n ) -> None:\n \"\"\"Feed the sample data and estimate the effect on the samples\n\n Arguments:\n X: Features of the samples.\n T: Treatments of the samples.\n\n Returns:\n None\n \"\"\"\n pass\n\n def get_result(self) -> Any:\n \"\"\"Get the estimated result\n\n The sub-class should implement this routine and runtime invokes it.\n\n Returns:\n The estimation result.\n \"\"\"\n return self._estimate_result\n\n def output(self, output_path: str) -> None:\n \"\"\"Output the estimated result to files\n\n The sub-class should implement this routine and runtime invokes it.\n\n Arguments:\n output_path: The path of output file.\n\n Returns:\n None\n \"\"\"\n from numpy import savetxt\n\n savetxt(output_path, self.get_result())\n logger.info(f\"Write result to file: {output_path}\")\n\n def _wrap_fit(m):\n @wraps(m)\n def call(self, *, X, Y, T, **kwargs):\n self._prefit(Y, T, X=X, **kwargs)\n # call the wrapped fit method\n m(self, X=X, Y=Y, T=T, **kwargs)\n self._postfit(Y, T, X=X, **kwargs)\n return self\n\n return call"
},
{
"identifier": "logger",
"path": "openasce/utils/logger.py",
"snippet": "GLOBAL_LOGGER_NAME = \"openasce-log\"\nDEFAULT_FORMAT = (\n \"[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d:%(funcName)s] %(message)s\"\n)\nDEFAULT_FORMATTER = logging.Formatter(DEFAULT_FORMAT)\ndef init_custom_logger(name):\nclass openasceLogger(object):"
}
] | import copy
import numpy as np
from functools import reduce
from typing import Dict, Iterable, List
from openasce.discovery.causal_graph import CausalGraph
from openasce.discovery.discovery import Discovery
from openasce.discovery.graph_node_form import GraphNodeForm
from openasce.inference.inference_model import InferenceModel
from openasce.utils.logger import logger | 7,404 |
class GraphInferModel(InferenceModel):
"""The inference using the causal graph
Attributes:
graph: The causal graph. If not set, the class will try to find it out if discovery is available.
column_names: all names of sample
treatment_name: treatment column name in column_names
label_name: target column name in column_names
"""
def __init__(
self,
*,
graph: CausalGraph = None,
column_names: List[str] = None,
treatment_name: str = None,
label_name: str = None,
num_iteration=20,
) -> None:
"""
Arguments:
graph: causal graph
column_names: all names of column
treatment_name: the name of treatment column
label_name: the name of target name
"""
super().__init__()
self._graph = graph
self._column_names = column_names
self._treatment_name = treatment_name
self._label_name = label_name
self._discovery = None
self._data = None
self._num_iteration = num_iteration
self._label_value = None
@property
def data(self):
assert self._data is not None, f"Must have sample data."
return self._data
@property
def graph(self):
assert self._graph is not None, "The graph object should be set"
return self._graph
@graph.setter
def graph(self, value):
assert self._graph is None, "The graph object should be set once only"
self._graph = value
# graph is available, set the column names using graph columns
self.column_names = list(self.graph.names_to_index.keys())
@property
def column_names(self):
"""All nodes' name.
Note: should include the treatment node and label node.
"""
assert self._column_names is not None, "The column names should be set"
return self._column_names
@column_names.setter
def column_names(self, value: List[str]):
assert self._column_names is None, "The column names should be set once only"
self._column_names = value
@property
def treatment_name(self):
assert self._treatment_name is not None, "The treatment name should be set"
return self._treatment_name
@treatment_name.setter
def treatment_name(self, value: str):
assert (
self._treatment_name is None
), "The treatment name should be set once only"
self._treatment_name = value
@property
def label_name(self):
assert self._label_name is not None, "The label name should be set"
return self._label_name
@label_name.setter
def label_name(self, value: str):
assert self._label_name is None, "The label name should be set once only"
self._label_name = value
@property
def discovery(self) -> Discovery:
assert self._discovery is not None, "The discovery object should be set"
return self._discovery
@discovery.setter
def discovery(self, value: Discovery):
self._discovery = value
def fit(
self,
*,
X: Iterable[np.ndarray],
Y: Iterable[np.ndarray] = None,
T: Iterable[np.ndarray] = None,
**kwargs,
) -> None:
"""Feed the sample data to train the graph
Arguments:
X: All features of the samples including the treatment and the label node.
Y: Ignore in causal graph inference
T: Ignore in causal graph inference.
Returns:
"""
if Y is not None or T is not None:
| # Copyright 2023 AntGroup CO., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
class GraphInferModel(InferenceModel):
"""The inference using the causal graph
Attributes:
graph: The causal graph. If not set, the class will try to find it out if discovery is available.
column_names: all names of sample
treatment_name: treatment column name in column_names
label_name: target column name in column_names
"""
def __init__(
self,
*,
graph: CausalGraph = None,
column_names: List[str] = None,
treatment_name: str = None,
label_name: str = None,
num_iteration=20,
) -> None:
"""
Arguments:
graph: causal graph
column_names: all names of column
treatment_name: the name of treatment column
label_name: the name of target name
"""
super().__init__()
self._graph = graph
self._column_names = column_names
self._treatment_name = treatment_name
self._label_name = label_name
self._discovery = None
self._data = None
self._num_iteration = num_iteration
self._label_value = None
@property
def data(self):
assert self._data is not None, f"Must have sample data."
return self._data
@property
def graph(self):
assert self._graph is not None, "The graph object should be set"
return self._graph
@graph.setter
def graph(self, value):
assert self._graph is None, "The graph object should be set once only"
self._graph = value
# graph is available, set the column names using graph columns
self.column_names = list(self.graph.names_to_index.keys())
@property
def column_names(self):
"""All nodes' name.
Note: should include the treatment node and label node.
"""
assert self._column_names is not None, "The column names should be set"
return self._column_names
@column_names.setter
def column_names(self, value: List[str]):
assert self._column_names is None, "The column names should be set once only"
self._column_names = value
@property
def treatment_name(self):
assert self._treatment_name is not None, "The treatment name should be set"
return self._treatment_name
@treatment_name.setter
def treatment_name(self, value: str):
assert (
self._treatment_name is None
), "The treatment name should be set once only"
self._treatment_name = value
@property
def label_name(self):
assert self._label_name is not None, "The label name should be set"
return self._label_name
@label_name.setter
def label_name(self, value: str):
assert self._label_name is None, "The label name should be set once only"
self._label_name = value
@property
def discovery(self) -> Discovery:
assert self._discovery is not None, "The discovery object should be set"
return self._discovery
@discovery.setter
def discovery(self, value: Discovery):
self._discovery = value
def fit(
self,
*,
X: Iterable[np.ndarray],
Y: Iterable[np.ndarray] = None,
T: Iterable[np.ndarray] = None,
**kwargs,
) -> None:
"""Feed the sample data to train the graph
Arguments:
X: All features of the samples including the treatment and the label node.
Y: Ignore in causal graph inference
T: Ignore in causal graph inference.
Returns:
"""
if Y is not None or T is not None: | logger.info( | 4 | 2023-12-06 05:54:36+00:00 | 12k |
eclipse-t2i/eclipse-inference | main.py | [
{
"identifier": "PriorTransformer",
"path": "src/priors/prior_transformer.py",
"snippet": "class PriorTransformer(ModelMixin, ConfigMixin):\n \"\"\"\n A Prior Transformer model.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 32): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.\n num_layers (`int`, *optional*, defaults to 20): The number of layers of Transformer blocks to use.\n embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `hidden_states`\n num_embeddings (`int`, *optional*, defaults to 77):\n The number of embeddings of the model input `hidden_states`\n additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the\n projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings +\n additional_embeddings`.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n time_embed_act_fn (`str`, *optional*, defaults to 'silu'):\n The activation function to use to create timestep embeddings.\n norm_in_type (`str`, *optional*, defaults to None): The normalization layer to apply on hidden states before\n passing to Transformer blocks. Set it to `None` if normalization is not needed.\n embedding_proj_norm_type (`str`, *optional*, defaults to None):\n The normalization layer to apply on the input `proj_embedding`. Set it to `None` if normalization is not\n needed.\n encoder_hid_proj_type (`str`, *optional*, defaults to `linear`):\n The projection layer to apply on the input `encoder_hidden_states`. Set it to `None` if\n `encoder_hidden_states` is `None`.\n added_emb_type (`str`, *optional*, defaults to `prd`): Additional embeddings to condition the model.\n Choose from `prd` or `None`. if choose `prd`, it will prepend a token indicating the (quantized) dot\n product between the text embedding and image embedding as proposed in the unclip paper\n https://arxiv.org/abs/2204.06125 If it is `None`, no additional embeddings will be prepended.\n time_embed_dim (`int, *optional*, defaults to None): The dimension of timestep embeddings.\n If None, will be set to `num_attention_heads * attention_head_dim`\n embedding_proj_dim (`int`, *optional*, default to None):\n The dimension of `proj_embedding`. If None, will be set to `embedding_dim`.\n clip_embed_dim (`int`, *optional*, default to None):\n The dimension of the output. If None, will be set to `embedding_dim`.\n \"\"\"\n\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 32,\n attention_head_dim: int = 64,\n num_layers: int = 20,\n embedding_dim: int = 768,\n num_embeddings=77,\n additional_embeddings=3, # as we have remvoed the time embedding\n dropout: float = 0.0,\n # time_embed_act_fn: str = \"silu\",\n norm_in_type: Optional[str] = None, # layer\n embedding_proj_norm_type: Optional[str] = None, # layer\n encoder_hid_proj_type: Optional[str] = \"linear\", # linear\n added_emb_type: Optional[str] = \"prd\", # prd\n # time_embed_dim: Optional[int] = None,\n embedding_proj_dim: Optional[int] = None,\n clip_embed_dim: Optional[int] = None,\n ):\n super().__init__()\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n self.additional_embeddings = additional_embeddings\n\n # time_embed_dim = time_embed_dim or inner_dim\n embedding_proj_dim = embedding_proj_dim or embedding_dim\n clip_embed_dim = clip_embed_dim or embedding_dim\n\n # self.time_proj = Timesteps(inner_dim, True, 0)\n # self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, out_dim=inner_dim, act_fn=time_embed_act_fn)\n\n self.proj_in = nn.Linear(embedding_dim, inner_dim)\n\n if embedding_proj_norm_type is None:\n self.embedding_proj_norm = None\n elif embedding_proj_norm_type == \"layer\":\n self.embedding_proj_norm = nn.LayerNorm(embedding_proj_dim)\n else:\n raise ValueError(f\"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}\")\n\n self.embedding_proj = nn.Linear(embedding_proj_dim, inner_dim)\n\n if encoder_hid_proj_type is None:\n self.encoder_hidden_states_proj = None\n elif encoder_hid_proj_type == \"linear\":\n self.encoder_hidden_states_proj = nn.Linear(embedding_dim, inner_dim)\n else:\n raise ValueError(f\"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}\")\n\n self.positional_embedding = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, inner_dim))\n\n if added_emb_type == \"prd\":\n self.prd_embedding = nn.Parameter(torch.zeros(1, 1, inner_dim))\n elif added_emb_type is None:\n self.prd_embedding = None\n else:\n raise ValueError(\n f\"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.\"\n )\n\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n activation_fn=\"gelu\",\n attention_bias=True,\n )\n for d in range(num_layers)\n ]\n )\n\n if norm_in_type == \"layer\":\n self.norm_in = nn.LayerNorm(inner_dim)\n elif norm_in_type is None:\n self.norm_in = None\n else:\n raise ValueError(f\"Unsupported norm_in_type: {norm_in_type}.\")\n\n self.norm_out = nn.LayerNorm(inner_dim)\n\n self.proj_to_clip_embeddings = nn.Linear(inner_dim, clip_embed_dim)\n\n causal_attention_mask = torch.full(\n [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -10000.0\n )\n causal_attention_mask.triu_(1)\n causal_attention_mask = causal_attention_mask[None, ...]\n self.register_buffer(\"causal_attention_mask\", causal_attention_mask, persistent=False)\n\n self.clip_mean = nn.Parameter(torch.zeros(1, clip_embed_dim))\n self.clip_std = nn.Parameter(torch.zeros(1, clip_embed_dim))\n\n @property\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors\n def attn_processors(self) -> Dict[str, AttentionProcessor]:\n r\"\"\"\n Returns:\n `dict` of attention processors: A dictionary containing all attention processors used in the model with\n indexed by its weight name.\n \"\"\"\n # set recursively\n processors = {}\n\n def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n if hasattr(module, \"set_processor\"):\n processors[f\"{name}.processor\"] = module.processor\n\n for sub_name, child in module.named_children():\n fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n return processors\n\n for name, module in self.named_children():\n fn_recursive_add_processors(name, module, processors)\n\n return processors\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor\n def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):\n r\"\"\"\n Sets the attention processor to use to compute attention.\n\n Parameters:\n processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):\n The instantiated processor class or a dictionary of processor classes that will be set as the processor\n for **all** `Attention` layers.\n\n If `processor` is a dict, the key needs to define the path to the corresponding cross attention\n processor. This is strongly recommended when setting trainable attention processors.\n\n \"\"\"\n count = len(self.attn_processors.keys())\n\n if isinstance(processor, dict) and len(processor) != count:\n raise ValueError(\n f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n )\n\n def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n if hasattr(module, \"set_processor\"):\n if not isinstance(processor, dict):\n module.set_processor(processor)\n else:\n module.set_processor(processor.pop(f\"{name}.processor\"))\n\n for sub_name, child in module.named_children():\n fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n for name, module in self.named_children():\n fn_recursive_attn_processor(name, module, processor)\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor\n def set_default_attn_processor(self):\n \"\"\"\n Disables custom attention processors and sets the default attention implementation.\n \"\"\"\n self.set_attn_processor(AttnProcessor())\n\n def forward(\n self,\n hidden_states,\n # timestep: Union[torch.Tensor, float, int],\n proj_embedding: torch.FloatTensor,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.BoolTensor] = None,\n return_dict: bool = True,\n ):\n \"\"\"\n The [`PriorTransformer`] forward method.\n\n Args:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`):\n The currently predicted image embeddings.\n timestep (`torch.LongTensor`):\n Current denoising step.\n proj_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`):\n Projected embedding vector the denoising process is conditioned on.\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_embeddings, embedding_dim)`):\n Hidden states of the text embeddings the denoising process is conditioned on.\n attention_mask (`torch.BoolTensor` of shape `(batch_size, num_embeddings)`):\n Text mask for the text embeddings.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.prior_transformer.PriorTransformerOutput`] instead of a plain\n tuple.\n\n Returns:\n [`~models.prior_transformer.PriorTransformerOutput`] or `tuple`:\n If return_dict is True, a [`~models.prior_transformer.PriorTransformerOutput`] is returned, otherwise a\n tuple is returned where the first element is the sample tensor.\n \"\"\"\n batch_size = hidden_states.shape[0]\n\n # timesteps = timestep\n # if not torch.is_tensor(timesteps):\n # timesteps = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device)\n # elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:\n # timesteps = timesteps[None].to(hidden_states.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n # timesteps = timesteps * torch.ones(batch_size, dtype=timesteps.dtype, device=timesteps.device)\n\n # timesteps_projected = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might be fp16, so we need to cast here.\n # timesteps_projected = timesteps_projected.to(dtype=self.dtype)\n # time_embeddings = self.time_embedding(timesteps_projected)\n\n if self.embedding_proj_norm is not None:\n proj_embedding = self.embedding_proj_norm(proj_embedding)\n\n proj_embeddings = self.embedding_proj(proj_embedding)\n if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:\n encoder_hidden_states = self.encoder_hidden_states_proj(encoder_hidden_states)\n elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:\n raise ValueError(\"`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set\")\n\n hidden_states = self.proj_in(hidden_states)\n\n positional_embeddings = self.positional_embedding.to(hidden_states.dtype)\n\n additional_embeds = []\n additional_embeddings_len = 0\n\n if encoder_hidden_states is not None:\n additional_embeds.append(encoder_hidden_states)\n additional_embeddings_len += encoder_hidden_states.shape[1]\n\n if len(proj_embeddings.shape) == 2:\n proj_embeddings = proj_embeddings[:, None, :]\n\n if len(hidden_states.shape) == 2:\n hidden_states = hidden_states[:, None, :]\n\n additional_embeds = additional_embeds + [\n proj_embeddings,\n # time_embeddings[:, None, :],\n hidden_states,\n ]\n\n if self.prd_embedding is not None:\n prd_embedding = self.prd_embedding.to(hidden_states.dtype).expand(batch_size, -1, -1)\n additional_embeds.append(prd_embedding)\n\n hidden_states = torch.cat(\n additional_embeds,\n dim=1,\n )\n\n # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens\n additional_embeddings_len = additional_embeddings_len + proj_embeddings.shape[1] + 1\n if positional_embeddings.shape[1] < hidden_states.shape[1]:\n positional_embeddings = F.pad(\n positional_embeddings,\n (\n 0,\n 0,\n additional_embeddings_len,\n self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,\n ),\n value=0.0,\n )\n\n hidden_states = hidden_states + positional_embeddings\n\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0\n attention_mask = F.pad(attention_mask, (0, self.additional_embeddings), value=0.0)\n attention_mask = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)\n attention_mask = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0)\n\n if self.norm_in is not None:\n hidden_states = self.norm_in(hidden_states)\n\n for block in self.transformer_blocks:\n hidden_states = block(hidden_states, attention_mask=attention_mask)\n\n hidden_states = self.norm_out(hidden_states)\n\n if self.prd_embedding is not None:\n hidden_states = hidden_states[:, -1]\n else:\n hidden_states = hidden_states[:, additional_embeddings_len:]\n\n predicted_image_embedding = self.proj_to_clip_embeddings(hidden_states)\n\n if not return_dict:\n return (predicted_image_embedding,)\n\n return PriorTransformerOutput(predicted_image_embedding=predicted_image_embedding)\n\n def post_process_latents(self, prior_latents):\n prior_latents = (prior_latents * self.clip_std) + self.clip_mean\n return prior_latents"
},
{
"identifier": "KandinskyPriorPipeline",
"path": "src/pipelines/pipeline_kandinsky_prior.py",
"snippet": "class KandinskyPriorPipeline(DiffusionPipeline):\n \"\"\"\n Pipeline for generating image prior for Kandinsky\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Args:\n prior ([`PriorTransformer`]):\n The canonincal unCLIP prior to approximate the image embedding from the text embedding.\n image_encoder ([`CLIPVisionModelWithProjection`]):\n Frozen image-encoder.\n text_encoder ([`CLIPTextModelWithProjection`]):\n Frozen text-encoder.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n scheduler ([`UnCLIPScheduler`]):\n A scheduler to be used in combination with `prior` to generate image embedding.\n \"\"\"\n\n _exclude_from_cpu_offload = [\"prior\"]\n\n def __init__(\n self,\n prior: PriorTransformer,\n image_encoder: CLIPVisionModelWithProjection,\n text_encoder: CLIPTextModelWithProjection,\n tokenizer: CLIPTokenizer,\n scheduler: UnCLIPScheduler,\n image_processor: CLIPImageProcessor,\n ):\n super().__init__()\n\n self.register_modules(\n prior=prior,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n scheduler=scheduler,\n image_encoder=image_encoder,\n image_processor=image_processor,\n )\n\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING)\n def interpolate(\n self,\n images_and_prompts: List[Union[str, PIL.Image.Image, torch.FloatTensor]],\n weights: List[float],\n num_images_per_prompt: int = 1,\n num_inference_steps: int = 25,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n negative_prior_prompt: Optional[str] = None,\n negative_prompt: str = \"\",\n guidance_scale: float = 4.0,\n device=None,\n ):\n \"\"\"\n Function invoked when using the prior pipeline for interpolation.\n\n Args:\n images_and_prompts (`List[Union[str, PIL.Image.Image, torch.FloatTensor]]`):\n list of prompts and images to guide the image generation.\n weights: (`List[float]`):\n list of weights for each condition in `images_and_prompts`\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n num_inference_steps (`int`, *optional*, defaults to 25):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n negative_prior_prompt (`str`, *optional*):\n The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if\n `guidance_scale` is less than `1`).\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt not to guide the image generation. Ignored when not using guidance (i.e., ignored if\n `guidance_scale` is less than `1`).\n guidance_scale (`float`, *optional*, defaults to 4.0):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n\n Examples:\n\n Returns:\n [`KandinskyPriorPipelineOutput`] or `tuple`\n \"\"\"\n\n device = device or self.device\n\n if len(images_and_prompts) != len(weights):\n raise ValueError(\n f\"`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length\"\n )\n\n image_embeddings = []\n for cond, weight in zip(images_and_prompts, weights):\n if isinstance(cond, str):\n image_emb = self(\n cond,\n num_inference_steps=num_inference_steps,\n num_images_per_prompt=num_images_per_prompt,\n generator=generator,\n latents=latents,\n negative_prompt=negative_prior_prompt,\n guidance_scale=guidance_scale,\n ).image_embeds\n\n elif isinstance(cond, (PIL.Image.Image, torch.Tensor)):\n if isinstance(cond, PIL.Image.Image):\n cond = (\n self.image_processor(cond, return_tensors=\"pt\")\n .pixel_values[0]\n .unsqueeze(0)\n .to(dtype=self.image_encoder.dtype, device=device)\n )\n\n image_emb = self.image_encoder(cond)[\"image_embeds\"]\n\n else:\n raise ValueError(\n f\"`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}\"\n )\n\n image_embeddings.append(image_emb * weight)\n\n image_emb = torch.cat(image_embeddings).sum(dim=0, keepdim=True)\n\n out_zero = self(\n negative_prompt,\n num_inference_steps=num_inference_steps,\n num_images_per_prompt=num_images_per_prompt,\n generator=generator,\n latents=latents,\n negative_prompt=negative_prior_prompt,\n guidance_scale=guidance_scale,\n )\n zero_image_emb = (\n out_zero.negative_image_embeds\n if negative_prompt == \"\"\n else out_zero.image_embeds\n )\n\n return KandinskyPriorPipelineOutput(\n image_embeds=image_emb, negative_image_embeds=zero_image_emb\n )\n\n # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents\n def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):\n if latents is None:\n latents = randn_tensor(\n shape, generator=generator, device=device, dtype=dtype\n )\n else:\n if latents.shape != shape:\n raise ValueError(\n f\"Unexpected latents shape, got {latents.shape}, expected {shape}\"\n )\n latents = latents.to(device)\n\n latents = latents * scheduler.init_noise_sigma\n return latents\n\n def get_zero_embed(self, batch_size=1, device=None):\n device = device or self.device\n zero_img = torch.zeros(\n 1,\n 3,\n self.image_encoder.config.image_size,\n self.image_encoder.config.image_size,\n ).to(device=device, dtype=self.image_encoder.dtype)\n zero_image_emb = self.image_encoder(zero_img)[\"image_embeds\"]\n zero_image_emb = zero_image_emb.repeat(batch_size, 1)\n return zero_image_emb\n\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n ):\n batch_size = len(prompt) if isinstance(prompt, list) else 1\n # get prompt text embeddings\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n text_mask = text_inputs.attention_mask.bool().to(device)\n\n untruncated_ids = self.tokenizer(\n prompt, padding=\"longest\", return_tensors=\"pt\"\n ).input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]\n\n text_encoder_output = self.text_encoder(text_input_ids.to(device))\n\n prompt_embeds = text_encoder_output.text_embeds\n text_encoder_hidden_states = text_encoder_output.last_hidden_state\n\n prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)\n text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(\n num_images_per_prompt, dim=0\n )\n text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)\n\n if do_classifier_free_guidance:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n uncond_text_mask = uncond_input.attention_mask.bool().to(device)\n negative_prompt_embeds_text_encoder_output = self.text_encoder(\n uncond_input.input_ids.to(device)\n )\n\n negative_prompt_embeds = (\n negative_prompt_embeds_text_encoder_output.text_embeds\n )\n uncond_text_encoder_hidden_states = (\n negative_prompt_embeds_text_encoder_output.last_hidden_state\n )\n\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n\n seq_len = negative_prompt_embeds.shape[1]\n negative_prompt_embeds = negative_prompt_embeds.repeat(\n 1, num_images_per_prompt\n )\n negative_prompt_embeds = negative_prompt_embeds.view(\n batch_size * num_images_per_prompt, seq_len\n )\n\n seq_len = uncond_text_encoder_hidden_states.shape[1]\n uncond_text_encoder_hidden_states = (\n uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)\n )\n uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(\n batch_size * num_images_per_prompt, seq_len, -1\n )\n uncond_text_mask = uncond_text_mask.repeat_interleave(\n num_images_per_prompt, dim=0\n )\n\n # done duplicates\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n text_encoder_hidden_states = torch.cat(\n [uncond_text_encoder_hidden_states, text_encoder_hidden_states]\n )\n\n text_mask = torch.cat([uncond_text_mask, text_mask])\n\n return prompt_embeds, text_encoder_hidden_states, text_mask\n\n def enable_model_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared\n to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`\n method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with\n `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.\n \"\"\"\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\n \"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\"\n )\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n if self.device.type != \"cpu\":\n self.to(\"cpu\", silence_dtype_warnings=True)\n torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)\n\n hook = None\n for cpu_offloaded_model in [self.text_encoder, self.prior]:\n _, hook = cpu_offload_with_hook(\n cpu_offloaded_model, device, prev_module_hook=hook\n )\n\n # We'll offload the last model manually.\n self.prior_hook = hook\n\n _, hook = cpu_offload_with_hook(\n self.image_encoder, device, prev_module_hook=self.prior_hook\n )\n\n self.final_offload_hook = hook\n\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_DOC_STRING)\n def __call__(\n self,\n prompt: Union[str, List[str]],\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: int = 1,\n num_inference_steps: int = 25,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n guidance_scale: float = 4.0,\n output_type: Optional[str] = \"pt\",\n return_dict: bool = True,\n ):\n \"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`):\n The prompt or prompts to guide the image generation.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored\n if `guidance_scale` is less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n num_inference_steps (`int`, *optional*, defaults to 25):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n guidance_scale (`float`, *optional*, defaults to 4.0):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n output_type (`str`, *optional*, defaults to `\"pt\"`):\n The output format of the generate image. Choose between: `\"np\"` (`np.array`) or `\"pt\"`\n (`torch.Tensor`).\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.\n\n Examples:\n\n Returns:\n [`KandinskyPriorPipelineOutput`] or `tuple`\n \"\"\"\n\n if isinstance(prompt, str):\n prompt = [prompt]\n elif not isinstance(prompt, list):\n raise ValueError(\n f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\"\n )\n\n if isinstance(negative_prompt, str):\n negative_prompt = [negative_prompt]\n elif not isinstance(negative_prompt, list) and negative_prompt is not None:\n raise ValueError(\n f\"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}\"\n )\n\n # if the negative prompt is defined we double the batch size to\n # directly retrieve the negative prompt embedding\n if negative_prompt is not None:\n prompt = prompt + negative_prompt\n negative_prompt = 2 * negative_prompt\n\n device = self._execution_device\n\n batch_size = len(prompt)\n batch_size = batch_size * num_images_per_prompt\n\n prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt(\n prompt, device, num_images_per_prompt, False, negative_prompt\n )\n\n hidden_states = randn_tensor(\n (batch_size, prompt_embeds.shape[-1]),\n device=prompt_embeds.device,\n dtype=prompt_embeds.dtype,\n generator=generator,\n )\n\n latents = self.prior(\n hidden_states,\n proj_embedding=prompt_embeds,\n encoder_hidden_states=text_encoder_hidden_states,\n attention_mask=text_mask,\n ).predicted_image_embedding\n\n image_embeddings = latents\n\n # if negative prompt has been defined, we retrieve split the image embedding into two\n if negative_prompt is None:\n zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device)\n\n if (\n hasattr(self, \"final_offload_hook\")\n and self.final_offload_hook is not None\n ):\n self.final_offload_hook.offload()\n else:\n image_embeddings, zero_embeds = image_embeddings.chunk(2)\n\n if (\n hasattr(self, \"final_offload_hook\")\n and self.final_offload_hook is not None\n ):\n self.prior_hook.offload()\n\n if output_type not in [\"pt\", \"np\"]:\n raise ValueError(\n f\"Only the output types `pt` and `np` are supported not output_type={output_type}\"\n )\n\n if output_type == \"np\":\n image_embeddings = image_embeddings.cpu().numpy()\n zero_embeds = zero_embeds.cpu().numpy()\n\n if not return_dict:\n return (image_embeddings, zero_embeds)\n\n return KandinskyPriorPipelineOutput(\n image_embeds=image_embeddings, negative_image_embeds=zero_embeds\n )"
}
] | import gradio as gr
import torch
import math
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from transformers import (
CLIPProcessor,
CLIPModel,
CLIPTokenizer,
CLIPTextModelWithProjection,
CLIPVisionModelWithProjection,
CLIPFeatureExtractor,
)
from typing import List
from PIL import Image, ImageChops
from diffusers import UnCLIPPipeline
from transformers import CLIPTokenizer
from src.priors.prior_transformer import (
PriorTransformer,
) # original huggingface prior transformer without time conditioning
from src.pipelines.pipeline_kandinsky_prior import KandinskyPriorPipeline
from diffusers import DiffusionPipeline | 8,689 |
# from diffusers.utils.torch_utils import randn_tensor
__DEVICE__ = "cpu"
if torch.cuda.is_available():
__DEVICE__ = "cuda"
class Ours:
def __init__(self, device):
text_encoder = (
CLIPTextModelWithProjection.from_pretrained(
"laion/CLIP-ViT-bigG-14-laion2B-39B-b160k",
projection_dim=1280,
torch_dtype=torch.float16,
)
.eval()
.requires_grad_(False)
)
tokenizer = CLIPTokenizer.from_pretrained(
"laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
)
prior = PriorTransformer.from_pretrained(
"ECLIPSE-Community/ECLIPSE_KandinskyV22_Prior",
torch_dtype=torch.float16,
)
|
# from diffusers.utils.torch_utils import randn_tensor
__DEVICE__ = "cpu"
if torch.cuda.is_available():
__DEVICE__ = "cuda"
class Ours:
def __init__(self, device):
text_encoder = (
CLIPTextModelWithProjection.from_pretrained(
"laion/CLIP-ViT-bigG-14-laion2B-39B-b160k",
projection_dim=1280,
torch_dtype=torch.float16,
)
.eval()
.requires_grad_(False)
)
tokenizer = CLIPTokenizer.from_pretrained(
"laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
)
prior = PriorTransformer.from_pretrained(
"ECLIPSE-Community/ECLIPSE_KandinskyV22_Prior",
torch_dtype=torch.float16,
)
| self.pipe_prior = KandinskyPriorPipeline.from_pretrained( | 1 | 2023-12-07 05:17:08+00:00 | 12k |
AIFSH/NativeDancer | nativedancer/third_part/detectron2/modeling/mmdet_wrapper.py | [
{
"identifier": "ShapeSpec",
"path": "nativedancer/third_part/detectron2/layers/shape_spec.py",
"snippet": "class ShapeSpec:\n \"\"\"\n A simple structure that contains basic shape specification about a tensor.\n It is often used as the auxiliary inputs/outputs of models,\n to complement the lack of shape inference ability among pytorch modules.\n \"\"\"\n\n channels: Optional[int] = None\n height: Optional[int] = None\n width: Optional[int] = None\n stride: Optional[int] = None"
},
{
"identifier": "Boxes",
"path": "nativedancer/third_part/detectron2/structures/boxes.py",
"snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n if not isinstance(tensor, torch.Tensor):\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device(\"cpu\"))\n else:\n tensor = tensor.to(torch.float32)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor"
},
{
"identifier": "ImageList",
"path": "nativedancer/third_part/detectron2/structures/image_list.py",
"snippet": "class ImageList:\n \"\"\"\n Structure that holds a list of images (of possibly\n varying sizes) as a single tensor.\n This works by padding the images to the same size.\n The original sizes of each image is stored in `image_sizes`.\n\n Attributes:\n image_sizes (list[tuple[int, int]]): each tuple is (h, w).\n During tracing, it becomes list[Tensor] instead.\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):\n \"\"\"\n Arguments:\n tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1\n image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can\n be smaller than (H, W) due to padding.\n \"\"\"\n self.tensor = tensor\n self.image_sizes = image_sizes\n\n def __len__(self) -> int:\n return len(self.image_sizes)\n\n def __getitem__(self, idx) -> torch.Tensor:\n \"\"\"\n Access the individual image in its original size.\n\n Args:\n idx: int or slice\n\n Returns:\n Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1\n \"\"\"\n size = self.image_sizes[idx]\n return self.tensor[idx, ..., : size[0], : size[1]]\n\n @torch.jit.unused\n def to(self, *args: Any, **kwargs: Any) -> \"ImageList\":\n cast_tensor = self.tensor.to(*args, **kwargs)\n return ImageList(cast_tensor, self.image_sizes)\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n @staticmethod\n def from_tensors(\n tensors: List[torch.Tensor],\n size_divisibility: int = 0,\n pad_value: float = 0.0,\n padding_constraints: Optional[Dict[str, int]] = None,\n ) -> \"ImageList\":\n \"\"\"\n Args:\n tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or\n (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded\n to the same shape with `pad_value`.\n size_divisibility (int): If `size_divisibility > 0`, add padding to ensure\n the common height and width is divisible by `size_divisibility`.\n This depends on the model and many models need a divisibility of 32.\n pad_value (float): value to pad.\n padding_constraints (optional[Dict]): If given, it would follow the format as\n {\"size_divisibility\": int, \"square_size\": int}, where `size_divisibility` will\n overwrite the above one if presented and `square_size` indicates the\n square padding size if `square_size` > 0.\n Returns:\n an `ImageList`.\n \"\"\"\n assert len(tensors) > 0\n assert isinstance(tensors, (tuple, list))\n for t in tensors:\n assert isinstance(t, torch.Tensor), type(t)\n assert t.shape[:-2] == tensors[0].shape[:-2], t.shape\n\n image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]\n image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]\n max_size = torch.stack(image_sizes_tensor).max(0).values\n\n if padding_constraints is not None:\n square_size = padding_constraints.get(\"square_size\", 0)\n if square_size > 0:\n # pad to square.\n max_size[0] = max_size[1] = square_size\n if \"size_divisibility\" in padding_constraints:\n size_divisibility = padding_constraints[\"size_divisibility\"]\n if size_divisibility > 1:\n stride = size_divisibility\n # the last two dims are H,W, both subject to divisibility requirement\n max_size = (max_size + (stride - 1)).div(stride, rounding_mode=\"floor\") * stride\n\n # handle weirdness of scripting and tracing ...\n if torch.jit.is_scripting():\n max_size: List[int] = max_size.to(dtype=torch.long).tolist()\n else:\n if torch.jit.is_tracing():\n image_sizes = image_sizes_tensor\n\n if len(tensors) == 1:\n # This seems slightly (2%) faster.\n # TODO: check whether it's faster for multiple images as well\n image_size = image_sizes[0]\n padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]\n batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)\n else:\n # max_size can be a tensor in tracing mode, therefore convert to list\n batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)\n device = (\n None if torch.jit.is_scripting() else (\"cpu\" if torch.jit.is_tracing() else None)\n )\n batched_imgs = tensors[0].new_full(batch_shape, pad_value, device=device)\n batched_imgs = move_device_like(batched_imgs, tensors[0])\n for i, img in enumerate(tensors):\n # Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)`\n # Tracing mode cannot capture `copy_()` of temporary locals\n batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img)\n\n return ImageList(batched_imgs.contiguous(), image_sizes)"
},
{
"identifier": "Instances",
"path": "nativedancer/third_part/detectron2/structures/instances.py",
"snippet": "class Instances:\n \"\"\"\n This class represents a list of instances in an image.\n It stores the attributes of instances (e.g., boxes, masks, labels, scores) as \"fields\".\n All fields must have the same ``__len__`` which is the number of instances.\n\n All other (non-field) attributes of this class are considered private:\n they must start with '_' and are not modifiable by a user.\n\n Some basic usage:\n\n 1. Set/get/check a field:\n\n .. code-block:: python\n\n instances.gt_boxes = Boxes(...)\n print(instances.pred_masks) # a tensor of shape (N, H, W)\n print('gt_masks' in instances)\n\n 2. ``len(instances)`` returns the number of instances\n 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields\n and returns a new :class:`Instances`.\n Typically, ``indices`` is a integer vector of indices,\n or a binary mask of length ``num_instances``\n\n .. code-block:: python\n\n category_3_detections = instances[instances.pred_classes == 3]\n confident_detections = instances[instances.scores > 0.9]\n \"\"\"\n\n def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\n \"\"\"\n Args:\n image_size (height, width): the spatial size of the image.\n kwargs: fields to add to this `Instances`.\n \"\"\"\n self._image_size = image_size\n self._fields: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.set(k, v)\n\n @property\n def image_size(self) -> Tuple[int, int]:\n \"\"\"\n Returns:\n tuple: height, width\n \"\"\"\n return self._image_size\n\n def __setattr__(self, name: str, val: Any) -> None:\n if name.startswith(\"_\"):\n super().__setattr__(name, val)\n else:\n self.set(name, val)\n\n def __getattr__(self, name: str) -> Any:\n if name == \"_fields\" or name not in self._fields:\n raise AttributeError(\"Cannot find field '{}' in the given Instances!\".format(name))\n return self._fields[name]\n\n def set(self, name: str, value: Any) -> None:\n \"\"\"\n Set the field named `name` to `value`.\n The length of `value` must be the number of instances,\n and must agree with other existing fields in this object.\n \"\"\"\n with warnings.catch_warnings(record=True):\n data_len = len(value)\n if len(self._fields):\n assert (\n len(self) == data_len\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\n self._fields[name] = value\n\n def has(self, name: str) -> bool:\n \"\"\"\n Returns:\n bool: whether the field called `name` exists.\n \"\"\"\n return name in self._fields\n\n def remove(self, name: str) -> None:\n \"\"\"\n Remove the field called `name`.\n \"\"\"\n del self._fields[name]\n\n def get(self, name: str) -> Any:\n \"\"\"\n Returns the field called `name`.\n \"\"\"\n return self._fields[name]\n\n def get_fields(self) -> Dict[str, Any]:\n \"\"\"\n Returns:\n dict: a dict which maps names (str) to data of the fields\n\n Modifying the returned dict will modify this instance.\n \"\"\"\n return self._fields\n\n # Tensor-like methods\n def to(self, *args: Any, **kwargs: Any) -> \"Instances\":\n \"\"\"\n Returns:\n Instances: all fields are called with a `to(device)`, if the field has this method.\n \"\"\"\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n if hasattr(v, \"to\"):\n v = v.to(*args, **kwargs)\n ret.set(k, v)\n return ret\n\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\n \"\"\"\n Args:\n item: an index-like object and will be used to index all the fields.\n\n Returns:\n If `item` is a string, return the data in the corresponding field.\n Otherwise, returns an `Instances` where all fields are indexed by `item`.\n \"\"\"\n if type(item) == int:\n if item >= len(self) or item < -len(self):\n raise IndexError(\"Instances index out of range!\")\n else:\n item = slice(item, None, len(self))\n\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n ret.set(k, v[item])\n return ret\n\n def __len__(self) -> int:\n for v in self._fields.values():\n # use __len__ because len() has to be int and is not friendly to tracing\n return v.__len__()\n raise NotImplementedError(\"Empty Instances does not support __len__!\")\n\n def __iter__(self):\n raise NotImplementedError(\"`Instances` object is not iterable!\")\n\n @staticmethod\n def cat(instance_lists: List[\"Instances\"]) -> \"Instances\":\n \"\"\"\n Args:\n instance_lists (list[Instances])\n\n Returns:\n Instances\n \"\"\"\n assert all(isinstance(i, Instances) for i in instance_lists)\n assert len(instance_lists) > 0\n if len(instance_lists) == 1:\n return instance_lists[0]\n\n image_size = instance_lists[0].image_size\n if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing\n for i in instance_lists[1:]:\n assert i.image_size == image_size\n ret = Instances(image_size)\n for k in instance_lists[0]._fields.keys():\n values = [i.get(k) for i in instance_lists]\n v0 = values[0]\n if isinstance(v0, torch.Tensor):\n values = torch.cat(values, dim=0)\n elif isinstance(v0, list):\n values = list(itertools.chain(*values))\n elif hasattr(type(v0), \"cat\"):\n values = type(v0).cat(values)\n else:\n raise ValueError(\"Unsupported type {} for concatenation\".format(type(v0)))\n ret.set(k, values)\n return ret\n\n def __str__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={}, \".format(len(self))\n s += \"image_height={}, \".format(self._image_size[0])\n s += \"image_width={}, \".format(self._image_size[1])\n s += \"fields=[{}])\".format(\", \".join((f\"{k}: {v}\" for k, v in self._fields.items())))\n return s\n\n __repr__ = __str__"
},
{
"identifier": "BitMasks",
"path": "nativedancer/third_part/detectron2/structures/masks.py",
"snippet": "class BitMasks:\n \"\"\"\n This class stores the segmentation masks for all objects in one image, in\n the form of bitmaps.\n\n Attributes:\n tensor: bool Tensor of N,H,W, representing N instances in the image.\n \"\"\"\n\n def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):\n \"\"\"\n Args:\n tensor: bool Tensor of N,H,W, representing N instances in the image.\n \"\"\"\n if isinstance(tensor, torch.Tensor):\n tensor = tensor.to(torch.bool)\n else:\n tensor = torch.as_tensor(tensor, dtype=torch.bool, device=torch.device(\"cpu\"))\n assert tensor.dim() == 3, tensor.size()\n self.image_size = tensor.shape[1:]\n self.tensor = tensor\n\n @torch.jit.unused\n def to(self, *args: Any, **kwargs: Any) -> \"BitMasks\":\n return BitMasks(self.tensor.to(*args, **kwargs))\n\n @property\n def device(self) -> torch.device:\n return self.tensor.device\n\n @torch.jit.unused\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"BitMasks\":\n \"\"\"\n Returns:\n BitMasks: Create a new :class:`BitMasks` by indexing.\n\n The following usage are allowed:\n\n 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.\n 2. `new_masks = masks[2:10]`: return a slice of masks.\n 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor\n with `length = len(masks)`. Nonzero elements in the vector will be selected.\n\n Note that the returned object might share storage with this object,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return BitMasks(self.tensor[item].unsqueeze(0))\n m = self.tensor[item]\n assert m.dim() == 3, \"Indexing on BitMasks with {} returns a tensor with shape {}!\".format(\n item, m.shape\n )\n return BitMasks(m)\n\n @torch.jit.unused\n def __iter__(self) -> torch.Tensor:\n yield from self.tensor\n\n @torch.jit.unused\n def __repr__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={})\".format(len(self.tensor))\n return s\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def nonempty(self) -> torch.Tensor:\n \"\"\"\n Find masks that are non-empty.\n\n Returns:\n Tensor: a BoolTensor which represents\n whether each mask is empty (False) or non-empty (True).\n \"\"\"\n return self.tensor.flatten(1).any(dim=1)\n\n @staticmethod\n def from_polygon_masks(\n polygon_masks: Union[\"PolygonMasks\", List[List[np.ndarray]]], height: int, width: int\n ) -> \"BitMasks\":\n \"\"\"\n Args:\n polygon_masks (list[list[ndarray]] or PolygonMasks)\n height, width (int)\n \"\"\"\n if isinstance(polygon_masks, PolygonMasks):\n polygon_masks = polygon_masks.polygons\n masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]\n if len(masks):\n return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))\n else:\n return BitMasks(torch.empty(0, height, width, dtype=torch.bool))\n\n @staticmethod\n def from_roi_masks(roi_masks: \"ROIMasks\", height: int, width: int) -> \"BitMasks\":\n \"\"\"\n Args:\n roi_masks:\n height, width (int):\n \"\"\"\n return roi_masks.to_bitmasks(height, width)\n\n def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\n \"\"\"\n Crop each bitmask by the given box, and resize results to (mask_size, mask_size).\n This can be used to prepare training targets for Mask R-CNN.\n It has less reconstruction error compared to rasterization with polygons.\n However we observe no difference in accuracy,\n but BitMasks requires more memory to store all the masks.\n\n Args:\n boxes (Tensor): Nx4 tensor storing the boxes for each mask\n mask_size (int): the size of the rasterized mask.\n\n Returns:\n Tensor:\n A bool tensor of shape (N, mask_size, mask_size), where\n N is the number of predicted boxes for this image.\n \"\"\"\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\n device = self.tensor.device\n\n batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]\n rois = torch.cat([batch_inds, boxes], dim=1) # Nx5\n\n bit_masks = self.tensor.to(dtype=torch.float32)\n rois = rois.to(device=device)\n output = (\n ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)\n .forward(bit_masks[:, None, :, :], rois)\n .squeeze(1)\n )\n output = output >= 0.5\n return output\n\n def get_bounding_boxes(self) -> Boxes:\n \"\"\"\n Returns:\n Boxes: tight bounding boxes around bitmasks.\n If a mask is empty, it's bounding box will be all zero.\n \"\"\"\n boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)\n x_any = torch.any(self.tensor, dim=1)\n y_any = torch.any(self.tensor, dim=2)\n for idx in range(self.tensor.shape[0]):\n x = torch.where(x_any[idx, :])[0]\n y = torch.where(y_any[idx, :])[0]\n if len(x) > 0 and len(y) > 0:\n boxes[idx, :] = torch.as_tensor(\n [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32\n )\n return Boxes(boxes)\n\n @staticmethod\n def cat(bitmasks_list: List[\"BitMasks\"]) -> \"BitMasks\":\n \"\"\"\n Concatenates a list of BitMasks into a single BitMasks\n\n Arguments:\n bitmasks_list (list[BitMasks])\n\n Returns:\n BitMasks: the concatenated BitMasks\n \"\"\"\n assert isinstance(bitmasks_list, (list, tuple))\n assert len(bitmasks_list) > 0\n assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)\n\n cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))\n return cat_bitmasks"
},
{
"identifier": "get_event_storage",
"path": "nativedancer/third_part/detectron2/utils/events.py",
"snippet": "def get_event_storage():\n \"\"\"\n Returns:\n The :class:`EventStorage` object that's currently being used.\n Throws an error if no :class:`EventStorage` is currently enabled.\n \"\"\"\n assert len(\n _CURRENT_STORAGE_STACK\n ), \"get_event_storage() has to be called inside a 'with EventStorage(...)' context!\"\n return _CURRENT_STORAGE_STACK[-1]"
},
{
"identifier": "Backbone",
"path": "nativedancer/third_part/detectron2/modeling/backbone/backbone.py",
"snippet": "class Backbone(nn.Module, metaclass=ABCMeta):\n \"\"\"\n Abstract base class for network backbones.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The `__init__` method of any subclass can specify its own set of arguments.\n \"\"\"\n super().__init__()\n\n @abstractmethod\n def forward(self):\n \"\"\"\n Subclasses must override this method, but adhere to the same return type.\n\n Returns:\n dict[str->Tensor]: mapping from feature name (e.g., \"res2\") to tensor\n \"\"\"\n pass\n\n @property\n def size_divisibility(self) -> int:\n \"\"\"\n Some backbones require the input height and width to be divisible by a\n specific integer. This is typically true for encoder / decoder type networks\n with lateral connection (e.g., FPN) for which feature maps need to match\n dimension in the \"bottom up\" and \"top down\" paths. Set to 0 if no specific\n input size divisibility is required.\n \"\"\"\n return 0\n\n @property\n def padding_constraints(self) -> Dict[str, int]:\n \"\"\"\n This property is a generalization of size_divisibility. Some backbones and training\n recipes require specific padding constraints, such as enforcing divisibility by a specific\n integer (e.g., FPN) or padding to a square (e.g., ViTDet with large-scale jitter\n in :paper:vitdet). `padding_constraints` contains these optional items like:\n {\n \"size_divisibility\": int,\n \"square_size\": int,\n # Future options are possible\n }\n `size_divisibility` will read from here if presented and `square_size` indicates the\n square padding size if `square_size` > 0.\n\n TODO: use type of Dict[str, int] to avoid torchscipt issues. The type of padding_constraints\n could be generalized as TypedDict (Python 3.8+) to support more types in the future.\n \"\"\"\n return {}\n\n def output_shape(self):\n \"\"\"\n Returns:\n dict[str->ShapeSpec]\n \"\"\"\n # this is a backward-compatible default\n return {\n name: ShapeSpec(\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\n )\n for name in self._out_features\n }"
}
] | import itertools
import logging
import numpy as np
import torch
from collections import OrderedDict
from collections.abc import Mapping
from typing import Dict, List, Optional, Tuple, Union
from omegaconf import DictConfig, OmegaConf
from torch import Tensor, nn
from ..layers import ShapeSpec
from ..structures import BitMasks, Boxes, ImageList, Instances
from ..utils.events import get_event_storage
from .backbone import Backbone
from mmcv.utils import ConfigDict
from mmdet.models import build_backbone
from mmdet.models import build_neck
from mmdet.models import build_detector
from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks | 9,579 | """
def __init__(
self,
detector: Union[nn.Module, Mapping],
*,
# Default is 32 regardless of model:
# https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets
size_divisibility=32,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
detector: a mmdet detector, or a mmdet config dict that defines a detector.
size_divisibility: pad input images to multiple of this number
pixel_mean: per-channel mean to normalize input image
pixel_std: per-channel stddev to normalize input image
"""
super().__init__()
if isinstance(detector, Mapping):
detector = build_detector(_to_container(detector))
self.detector = detector
self.detector.init_weights()
self.size_divisibility = size_divisibility
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor
metas = []
rescale = {"height" in x for x in batched_inputs}
if len(rescale) != 1:
raise ValueError("Some inputs have original height/width, but some don't!")
rescale = list(rescale)[0]
output_shapes = []
for input in batched_inputs:
meta = {}
c, h, w = input["image"].shape
meta["img_shape"] = meta["ori_shape"] = (h, w, c)
if rescale:
scale_factor = np.array(
[w / input["width"], h / input["height"]] * 2, dtype="float32"
)
ori_shape = (input["height"], input["width"])
output_shapes.append(ori_shape)
meta["ori_shape"] = ori_shape + (c,)
else:
scale_factor = 1.0
output_shapes.append((h, w))
meta["scale_factor"] = scale_factor
meta["flip"] = False
padh, padw = images.shape[-2:]
meta["pad_shape"] = (padh, padw, c)
metas.append(meta)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
if gt_instances[0].has("gt_masks"):
def convert_mask(m, shape):
# mmdet mask format
if isinstance(m, BitMasks):
return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1])
else:
return mm_PolygonMasks(m.polygons, shape[0], shape[1])
gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances]
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
gt_masks=gt_masks,
)
else:
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
)
return _parse_losses(losses_and_metrics)
else:
results = self.detector.simple_test(images, metas, rescale=rescale)
results = [
{"instances": _convert_mmdet_result(r, shape)}
for r, shape in zip(results, output_shapes)
]
return results
@property
def device(self):
return self.pixel_mean.device
# Reference: show_result() in
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances:
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0]
else:
bbox_result, segm_result = result, None
bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5
bboxes, scores = bboxes[:, :4], bboxes[:, -1]
labels = [
torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result)
]
labels = torch.cat(labels)
inst = Instances(shape)
| # Copyright (c) Facebook, Inc. and its affiliates.
logger = logging.getLogger(__name__)
def _to_container(cfg):
"""
mmdet will assert the type of dict/list.
So convert omegaconf objects to dict/list.
"""
if isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
return ConfigDict(cfg)
class MMDetBackbone(Backbone):
"""
Wrapper of mmdetection backbones to use in detectron2.
mmdet backbones produce list/tuple of tensors, while detectron2 backbones
produce a dict of tensors. This class wraps the given backbone to produce
output in detectron2's convention, so it can be used in place of detectron2
backbones.
"""
def __init__(
self,
backbone: Union[nn.Module, Mapping],
neck: Union[nn.Module, Mapping, None] = None,
*,
output_shapes: List[ShapeSpec],
output_names: Optional[List[str]] = None,
):
"""
Args:
backbone: either a backbone module or a mmdet config dict that defines a
backbone. The backbone takes a 4D image tensor and returns a
sequence of tensors.
neck: either a backbone module or a mmdet config dict that defines a
neck. The neck takes outputs of backbone and returns a
sequence of tensors. If None, no neck is used.
output_shapes: shape for every output of the backbone (or neck, if given).
stride and channels are often needed.
output_names: names for every output of the backbone (or neck, if given).
By default, will use "out0", "out1", ...
"""
super().__init__()
if isinstance(backbone, Mapping):
backbone = build_backbone(_to_container(backbone))
self.backbone = backbone
if isinstance(neck, Mapping):
neck = build_neck(_to_container(neck))
self.neck = neck
# "Neck" weights, if any, are part of neck itself. This is the interface
# of mmdet so we follow it. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py
logger.info("Initializing mmdet backbone weights...")
self.backbone.init_weights()
# train() in mmdet modules is non-trivial, and has to be explicitly
# called. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py
self.backbone.train()
if self.neck is not None:
logger.info("Initializing mmdet neck weights ...")
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.neck.train()
self._output_shapes = output_shapes
if not output_names:
output_names = [f"out{i}" for i in range(len(output_shapes))]
self._output_names = output_names
def forward(self, x) -> Dict[str, Tensor]:
outs = self.backbone(x)
if self.neck is not None:
outs = self.neck(outs)
assert isinstance(
outs, (list, tuple)
), "mmdet backbone should return a list/tuple of tensors!"
if len(outs) != len(self._output_shapes):
raise ValueError(
"Length of output_shapes does not match outputs from the mmdet backbone: "
f"{len(outs)} != {len(self._output_shapes)}"
)
return {k: v for k, v in zip(self._output_names, outs)}
def output_shape(self) -> Dict[str, ShapeSpec]:
return {k: v for k, v in zip(self._output_names, self._output_shapes)}
class MMDetDetector(nn.Module):
"""
Wrapper of a mmdetection detector model, for detection and instance segmentation.
Input/output formats of this class follow detectron2's convention, so a
mmdetection model can be trained and evaluated in detectron2.
"""
def __init__(
self,
detector: Union[nn.Module, Mapping],
*,
# Default is 32 regardless of model:
# https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets
size_divisibility=32,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
detector: a mmdet detector, or a mmdet config dict that defines a detector.
size_divisibility: pad input images to multiple of this number
pixel_mean: per-channel mean to normalize input image
pixel_std: per-channel stddev to normalize input image
"""
super().__init__()
if isinstance(detector, Mapping):
detector = build_detector(_to_container(detector))
self.detector = detector
self.detector.init_weights()
self.size_divisibility = size_divisibility
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor
metas = []
rescale = {"height" in x for x in batched_inputs}
if len(rescale) != 1:
raise ValueError("Some inputs have original height/width, but some don't!")
rescale = list(rescale)[0]
output_shapes = []
for input in batched_inputs:
meta = {}
c, h, w = input["image"].shape
meta["img_shape"] = meta["ori_shape"] = (h, w, c)
if rescale:
scale_factor = np.array(
[w / input["width"], h / input["height"]] * 2, dtype="float32"
)
ori_shape = (input["height"], input["width"])
output_shapes.append(ori_shape)
meta["ori_shape"] = ori_shape + (c,)
else:
scale_factor = 1.0
output_shapes.append((h, w))
meta["scale_factor"] = scale_factor
meta["flip"] = False
padh, padw = images.shape[-2:]
meta["pad_shape"] = (padh, padw, c)
metas.append(meta)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
if gt_instances[0].has("gt_masks"):
def convert_mask(m, shape):
# mmdet mask format
if isinstance(m, BitMasks):
return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1])
else:
return mm_PolygonMasks(m.polygons, shape[0], shape[1])
gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances]
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
gt_masks=gt_masks,
)
else:
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
)
return _parse_losses(losses_and_metrics)
else:
results = self.detector.simple_test(images, metas, rescale=rescale)
results = [
{"instances": _convert_mmdet_result(r, shape)}
for r, shape in zip(results, output_shapes)
]
return results
@property
def device(self):
return self.pixel_mean.device
# Reference: show_result() in
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances:
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0]
else:
bbox_result, segm_result = result, None
bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5
bboxes, scores = bboxes[:, :4], bboxes[:, -1]
labels = [
torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result)
]
labels = torch.cat(labels)
inst = Instances(shape) | inst.pred_boxes = Boxes(bboxes) | 1 | 2023-12-10 20:14:00+00:00 | 12k |
ethanweber/nerfiller | nerfiller/scripts/inpaint_nerfstudio_dataset.py | [
{
"identifier": "RGBInpainter",
"path": "nerfiller/inpaint/rgb_inpainter.py",
"snippet": "class RGBInpainter:\n \"\"\"\n Module for inpainting with the stable diffusion inpainting pipeline.\n \"\"\"\n\n def __init__(\n self,\n half_precision_weights: bool = True,\n lora_model_path: Optional[str] = None,\n device: str = \"cuda:0\",\n vae_device: str = \"cuda:0\",\n pipeline_name: str = \"stabilityai/stable-diffusion-2-inpainting\",\n ):\n print(f\"Loading RGB Inpainter ...\")\n\n self.half_precision_weights = half_precision_weights\n self.lora_model_path = lora_model_path\n self.device = device\n self.vae_device = vae_device\n self.dtype = torch.float16 if self.half_precision_weights else torch.float32\n self.pipeline_name = pipeline_name\n self.set_pipe()\n self.setup()\n\n def set_pipe(self):\n pipe_kwargs = {\n \"safety_checker\": None,\n \"feature_extractor\": None,\n \"requires_safety_checker\": False,\n \"torch_dtype\": self.dtype,\n }\n self.pipe = StableDiffusionInpaintPipeline.from_pretrained(\n self.pipeline_name,\n **pipe_kwargs,\n )\n\n def setup(self):\n # Load LoRA\n if self.lora_model_path:\n self.pipe.load_lora_weights(self.lora_model_path)\n print(f\"Loaded LoRA model from {self.lora_model_path}\")\n\n self.tokenizer = self.pipe.tokenizer\n self.text_encoder = self.pipe.text_encoder.to(self.device).eval()\n\n self.unet = self.pipe.unet.to(self.device).eval()\n self.vae = self.pipe.vae.to(self.vae_device).eval()\n\n self.vae_scale_factor = 2 ** (len(self.pipe.vae.config.block_out_channels) - 1)\n self.vae_latent_channels = self.pipe.vae.config.latent_channels\n\n # self.scheduler = DDPMScheduler.from_config(self.pipe.scheduler.config)\n self.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)\n self.num_train_timesteps = self.scheduler.num_train_timesteps\n self.alphas = self.scheduler.alphas_cumprod.to(self.device)\n\n del self.pipe\n cleanup()\n\n print(f\"Loaded RGB inpainter!\")\n\n def compute_text_embeddings(self, prompt: str, negative_prompt: str):\n \"\"\"Get the text embeddings for a string.\"\"\"\n assert self.tokenizer is not None\n assert self.text_encoder is not None\n with torch.no_grad():\n text_inputs = tokenize_prompt(self.tokenizer, prompt, tokenizer_max_length=None)\n prompt_embeds = encode_prompt(\n self.text_encoder,\n text_inputs.input_ids,\n text_inputs.attention_mask,\n text_encoder_use_attention_mask=False,\n )\n negative_text_inputs = tokenize_prompt(self.tokenizer, negative_prompt, tokenizer_max_length=None)\n negative_prompt_embeds = encode_prompt(\n self.text_encoder,\n negative_text_inputs.input_ids,\n negative_text_inputs.attention_mask,\n text_encoder_use_attention_mask=False,\n )\n\n return [prompt_embeds, negative_prompt_embeds]\n\n def destroy_text_encoder(self) -> None:\n \"\"\"Delete the text modules to save on memory.\"\"\"\n del self.tokenizer\n del self.text_encoder\n cleanup()\n\n def forward_unet(\n self,\n sample,\n t,\n text_embeddings,\n denoise_in_grid: bool = False,\n ):\n # process embeddings\n prompt_embeds, negative_prompt_embeds = text_embeddings\n\n batch_size = sample.shape[0] // 3\n\n prompt_embeds = torch.cat(\n [\n prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n ]\n )\n\n if denoise_in_grid:\n grid_sample = make_grid(sample)\n grid_prompt_embeds = prompt_embeds[:3].repeat(grid_sample.shape[0] // 3, 1, 1)\n noise_pred = self.unet(\n sample=grid_sample,\n timestep=t,\n encoder_hidden_states=grid_prompt_embeds,\n return_dict=False,\n )[0]\n noise_pred = undo_grid(noise_pred)\n else:\n noise_pred = self.unet(\n sample=sample,\n timestep=t,\n encoder_hidden_states=prompt_embeds,\n return_dict=False,\n )[0]\n return noise_pred\n\n def get_noise_pred(\n self,\n t,\n model_input: ModelInput,\n text_embeddings,\n text_guidance_scale: float = 0.0,\n image_guidance_scale: float = 0.0,\n denoise_in_grid: bool = False,\n multidiffusion_steps: int = 1,\n multidiffusion_type: str = \"epsilon\",\n randomize_latents: bool = False,\n randomize_within_grid: bool = False,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n only_noise_pred: bool = False,\n ):\n assert self.scheduler.config.prediction_type == \"epsilon\", \"We assume the model predicts epsilon.\"\n\n batch_size = model_input.latents.shape[0]\n value = torch.zeros_like(model_input.latents)\n count = torch.zeros_like(model_input.latents)\n\n for i in range(multidiffusion_steps):\n if randomize_latents:\n indices = torch.randperm(batch_size)\n else:\n indices = torch.arange(batch_size)\n\n if denoise_in_grid and randomize_within_grid:\n for j in range(0, len(indices), 4):\n indices[j : j + 4] = indices[j : j + 4][torch.randperm(4)]\n\n latents = model_input.latents[indices]\n latents_mask = model_input.latents_mask[indices]\n latents_mask_uncond = model_input.latents_mask_uncond[indices]\n masked_image_latents = model_input.masked_image_latents[indices]\n masked_image_latents_uncond = model_input.masked_image_latents_uncond[indices]\n\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents, latents, latents])\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n latents_mask_input = torch.cat([latents_mask, latents_mask, latents_mask_uncond])\n masked_image_latents_input = torch.cat(\n [\n masked_image_latents,\n masked_image_latents,\n masked_image_latents_uncond,\n ]\n )\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input_cat = torch.cat(\n [latent_model_input, latents_mask_input, masked_image_latents_input],\n dim=1,\n )\n\n # TODO: save compute by skipping some text encodings if not using them in CFG\n\n noise_pred_all = self.forward_unet(\n sample=latent_model_input_cat,\n t=t,\n text_embeddings=text_embeddings,\n denoise_in_grid=denoise_in_grid,\n )\n\n noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred_all.chunk(3)\n\n noise_pred = (\n noise_pred_image\n + text_guidance_scale * (noise_pred_text - noise_pred_image)\n + image_guidance_scale * (noise_pred_image - noise_pred_uncond)\n )\n\n if multidiffusion_type == \"v_prediction\":\n v_prediction = get_v_prediction_from_epsilon(noise_pred, t, latents, self.scheduler.alphas_cumprod)\n value[indices] += v_prediction\n count[indices] += 1\n elif multidiffusion_type == \"epsilon\":\n value[indices] += noise_pred\n count[indices] += 1\n else:\n raise ValueError(\"Not implemented.\")\n\n # take the MultiDiffusion step\n final_noise_pred = torch.where(count > 0, value / count, value)\n\n if multidiffusion_type == \"v_prediction\":\n final_noise_pred = get_epsilon_from_v_prediction(\n final_noise_pred,\n t.item(),\n model_input.latents,\n self.scheduler.alphas_cumprod,\n )\n elif multidiffusion_type == \"epsilon\":\n pass\n else:\n raise ValueError(\"Not implemented.\")\n\n if only_noise_pred:\n return None, None, final_noise_pred\n\n scheduler_output = self.scheduler.step(final_noise_pred, t, model_input.latents, generator=generator)\n pred_prev_sample = scheduler_output.prev_sample\n pred_original_sample = scheduler_output.pred_original_sample\n\n assert not pred_prev_sample.isnan().any()\n assert not pred_original_sample.isnan().any()\n return pred_prev_sample, pred_original_sample, final_noise_pred\n\n def get_model_input(\n self,\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n starting_timestep: Optional[int] = None,\n keep_grad: bool = False,\n ) -> ModelInput:\n \"\"\"Returns the inputs for the unet.\"\"\"\n\n # TODO: incorporate seeds\n\n batch_size, _, height, width = image.shape\n\n noise = randn_tensor(\n shape=(\n batch_size,\n self.vae_latent_channels,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n ),\n generator=generator,\n device=torch.device(self.device),\n dtype=self.dtype,\n )\n if starting_image is not None:\n assert starting_timestep is not None\n if keep_grad:\n latents = self.encode_images(starting_image)\n else:\n with torch.no_grad():\n latents = self.encode_images(starting_image)\n latents = self.scheduler.add_noise(latents, noise, starting_timestep)\n else:\n latents = noise\n\n latents_mask = torch.nn.functional.interpolate(\n mask,\n size=(height // self.vae_scale_factor, width // self.vae_scale_factor),\n mode=\"nearest\",\n )\n assert len(torch.unique(latents_mask)) <= 2\n latents_mask = latents_mask.to(device=self.device, dtype=self.dtype)\n assert len(torch.unique(mask)) <= 2\n masked_image = torch.where(mask == 0, image, 0.5)\n with torch.no_grad():\n masked_image_latents = self.encode_images(masked_image)\n\n latents_mask_uncond = torch.ones_like(latents_mask)\n masked_image_uncond = torch.ones_like(masked_image) * 0.5\n with torch.no_grad():\n masked_image_latents_uncond = self.encode_images(masked_image_uncond)\n\n model_input = ModelInput(\n latents.to(device=self.device, dtype=self.dtype),\n latents_mask.to(device=self.device, dtype=self.dtype),\n masked_image_latents.to(device=self.device, dtype=self.dtype),\n latents_mask_uncond.to(device=self.device, dtype=self.dtype),\n masked_image_latents_uncond.to(device=self.device, dtype=self.dtype),\n noise.to(device=self.device, dtype=self.dtype),\n )\n\n return model_input\n\n def get_loss(\n self,\n x0: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n multiview_guidance_scale: float = 0.0,\n reconstruction_guidance_scale: float = 0.0,\n feature_extractor: Optional[FeatureExtractor] = None,\n multiview_metric: Optional[MultiviewMetric] = None,\n K: Optional[Float[Tensor, \"B 3 3\"]] = None,\n c2w: Optional[Float[Tensor, \"B 3 4\"]] = None,\n output_folder: Optional[Path] = None,\n step: int = 0,\n guidance_step: int = 0,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n ):\n \"\"\"Losses on the VAE decoded images x0.\n The multi-view loss is applied where mask == 0.0 (regions that have known depth).\n \"\"\"\n\n loss = 0.0\n\n if multiview_guidance_scale != 0.0:\n features = feature_extractor(x0.to(feature_extractor.device)).to(self.device)\n\n # multiview guidance\n scale_factor = features.shape[-1] / x0.shape[-1]\n K_scaled = rescale_intrinsics(K, scale_factor, scale_factor)\n mask_scaled = 1.0 - torch.nn.functional.interpolate(mask, scale_factor=scale_factor, mode=\"nearest\")\n depth_scaled = torch.nn.functional.interpolate(depth, scale_factor=scale_factor, mode=\"bilinear\")\n for cam1 in range(len(c2w)):\n for cam2 in range(cam1 + 1, len(c2w)):\n loss_mv, loss_dict = multiview_metric(\n features1=features[cam1 : cam1 + 1],\n features2=features[cam2 : cam2 + 1],\n K1=K_scaled[cam1 : cam1 + 1],\n K2=K_scaled[cam2 : cam2 + 1],\n c2w1=c2w[cam1 : cam1 + 1],\n c2w2=c2w[cam2 : cam2 + 1],\n image1=x0[cam1 : cam1 + 1],\n image2=x0[cam2 : cam2 + 1],\n mask1=mask_scaled[cam1 : cam1 + 1],\n mask2=mask_scaled[cam2 : cam2 + 1],\n depth1=depth_scaled[cam1 : cam1 + 1],\n depth2=depth_scaled[cam2 : cam2 + 1],\n output_folder=output_folder if (cam1 == 0 and guidance_step == 0) else None,\n suffix=f\"-{step:06d}-{cam1:06d}-{cam2:06d}-{guidance_step:06d}\",\n )\n loss += multiview_guidance_scale * loss_mv.sum()\n\n if reconstruction_guidance_scale != 0.0:\n loss += (\n reconstruction_guidance_scale * (((starting_image.to(x0.device) - x0) * mask.to(x0.device)) ** 2).mean()\n )\n\n return loss\n\n @torch.cuda.amp.autocast(enabled=True)\n def get_image(\n self,\n text_embeddings,\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n num_inference_steps: int = 20,\n denoise_in_grid: bool = False,\n depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n text_guidance_scale: Optional[float] = None,\n image_guidance_scale: Optional[float] = None,\n multidiffusion_steps: int = 1,\n multidiffusion_type: str = \"epsilon\",\n randomize_latents: bool = False,\n randomize_within_grid: bool = False,\n use_decoder_approximation: bool = False,\n multiview_guidance_scale: float = 0.0,\n reconstruction_guidance_scale: float = 0.0,\n feature_extractor: Optional[FeatureExtractor] = None,\n multiview_metric: Optional[MultiviewMetric] = None,\n K: Optional[Float[Tensor, \"B 3 3\"]] = None,\n c2w: Optional[Float[Tensor, \"B 3 4\"]] = None,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n show_multiview: bool = False,\n guidance_steps: List[int] = [5],\n num_guidance_steps: int = 10,\n classifier_guidance_scale: float = 0.0,\n output_folder: Optional[Path] = None,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n starting_lower_bound: Optional[float] = None,\n starting_upper_bound: Optional[float] = None,\n classifier_guidance_loss_rescale=1000.0,\n classifier_guidance_start_step: int = 0,\n replace_original_pixels: bool = False,\n ) -> Float[Tensor, \"B 3 H W\"]:\n \"\"\"Run the denoising sampling process, also known as the reverse process.\n Inpaint where mask == 1.\n If output folder is not None, then save images to this folder.\n\n Args:\n text_embeddings: Either 2 per image (BB) or 2 total, which will use the same cond. and uncond. prompts for all.\n loss_rescale: To prevent fp16 underflow\n \"\"\"\n\n if output_folder:\n output_folder.mkdir(parents=True, exist_ok=True)\n\n batch_size, _, height, width = image.shape\n\n if starting_lower_bound is not None:\n min_step = int(self.num_train_timesteps * starting_lower_bound)\n max_step = int(self.num_train_timesteps * starting_upper_bound)\n # select t, set multi-step diffusion\n T = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device)\n self.scheduler.config.num_train_timesteps = T.item()\n else:\n self.scheduler.config.num_train_timesteps = self.num_train_timesteps\n\n self.scheduler.set_timesteps(num_inference_steps, device=self.device)\n\n model_input = self.get_model_input(\n image=image,\n mask=mask,\n generator=generator,\n # self.scheduler.config.num_train_timesteps == 1000 is equivalent to starting_lower_bound and starting_upper_bound both being 1\n # so start with full noise by setting this to None\n starting_image=starting_image if self.scheduler.config.num_train_timesteps != 1000 else None,\n starting_timestep=self.scheduler.timesteps[0],\n )\n\n if depth is None:\n depth = torch.zeros_like(mask)\n\n progress = Progress(\n TextColumn(\"[progress.description]{task.description}\"),\n BarColumn(),\n TaskProgressColumn(),\n TimeElapsedColumn(),\n )\n task1 = progress.add_task(\n f\"[green]Inpainting batch of images...\",\n total=len(self.scheduler.timesteps),\n )\n\n with progress:\n for i, t in enumerate(self.scheduler.timesteps):\n start_time = time.time()\n\n # DragDiffusion style guidance (\"drag\")\n use_drag_guidance = (\n multiview_guidance_scale != 0.0 or reconstruction_guidance_scale != 0.0\n ) and i in guidance_steps\n if use_drag_guidance:\n model_input.latents = model_input.latents.to(torch.float32).detach().requires_grad_(True)\n scaler = torch.cuda.amp.GradScaler()\n optimizer = torch.optim.Adam([model_input.latents], lr=1e-2)\n for guidance_step in range(num_guidance_steps):\n _, pred_original_sample, _ = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n denoise_in_grid=denoise_in_grid,\n multidiffusion_steps=1,\n multidiffusion_type=multidiffusion_type,\n randomize_latents=randomize_latents,\n randomize_within_grid=randomize_within_grid,\n )\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n if output_folder:\n image_x0 = torch.cat(list(x0.permute(0, 2, 3, 1)), dim=1).detach().cpu()\n mediapy.write_image(\n output_folder / f\"x0-{i:06d}-{guidance_step:06d}.png\",\n image_x0,\n )\n\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=output_folder / \"drag_guidance\",\n step=i,\n guidance_step=guidance_step,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/drag_guidance_loss-{i}\": loss})\n\n optimizer.zero_grad()\n assert not loss.isnan().any()\n scaler.scale(loss).backward()\n\n assert not model_input.latents.grad.isnan().any()\n # print(\n # model_input.latents.grad.abs().mean(),\n # (model_input.latents.grad == 0.0).sum() / model_input.latents.grad.numel(),\n # )\n\n scaler.step(optimizer)\n assert not model_input.latents.isnan().any()\n assert not depth.isnan().any()\n scaler.update()\n\n # take a step\n use_classifier_guidance = classifier_guidance_scale != 0.0 and i >= classifier_guidance_start_step\n model_input.latents = (\n model_input.latents.to(self.dtype).detach().requires_grad_(use_classifier_guidance)\n )\n with torch.enable_grad() if use_classifier_guidance else torch.no_grad():\n _, pred_original_sample, noise_pred = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n denoise_in_grid=denoise_in_grid,\n multidiffusion_steps=multidiffusion_steps,\n multidiffusion_type=multidiffusion_type,\n randomize_latents=randomize_latents,\n randomize_within_grid=randomize_within_grid,\n )\n\n # classifier guidance (\"classifier\")\n if use_classifier_guidance:\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=output_folder / \"classifier_guidance\",\n step=i,\n guidance_step=0,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/classifier_guidance_loss\": loss})\n\n grad = (\n torch.autograd.grad(\n classifier_guidance_loss_rescale * loss,\n model_input.latents,\n )[0]\n / classifier_guidance_loss_rescale\n )\n # print(\n # grad.abs().mean(),\n # (grad == 0.0).sum() / grad.numel(),\n # )\n noise_pred = noise_pred + classifier_guidance_scale * grad\n\n model_input.latents = model_input.latents.detach().requires_grad_(False)\n scheduler_output = self.scheduler.step(noise_pred, t, model_input.latents, generator=generator)\n model_input.latents = scheduler_output.prev_sample\n\n if output_folder:\n # save the denoised x0\n with torch.no_grad():\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n if use_drag_guidance or use_classifier_guidance:\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=None,\n step=i,\n guidance_step=0,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/loss\": loss})\n\n image_x0 = torch.cat(list(x0.permute(0, 2, 3, 1)), dim=1).detach().cpu()\n mediapy.write_image(output_folder / \"x0.png\", image_x0)\n mediapy.write_image(output_folder / f\"x0-{i:06d}.png\", image_x0)\n\n progress.update(task1, advance=1)\n end_time = time.time()\n # print(f\"[green]Time for iter {i}:\", end_time - start_time)\n\n if output_folder:\n output_filename = str(output_folder) + \".mp4\"\n CONSOLE.print(f\"[green]Saving video to {output_filename}\")\n save_video_from_path(\n path=output_folder,\n glob_str=\"x0*png\",\n sec=10,\n output_filename=output_filename,\n )\n\n with torch.no_grad():\n x0 = self.decode_latents(\n model_input.latents.detach(),\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n return x0\n\n def encode_images(self, imgs: Float[Tensor, \"B 3 512 512\"]) -> Float[Tensor, \"B 4 64 64\"]:\n imgs = imgs * 2.0 - 1.0\n sampled_posterior = self.vae.encode(imgs.to(self.vae_device), return_dict=False)[0].sample().to(self.device)\n latents = sampled_posterior * 0.18215\n return latents\n\n def decode_latents(\n self,\n latents: Float[Tensor, \"B 4 H W\"],\n use_decoder_approximation: bool = False,\n ) -> Float[Tensor, \"B 3 Hout Wout\"]:\n if use_decoder_approximation:\n da = get_decoder_approximation().to(latents)\n x = torch.nn.functional.interpolate(latents, scale_factor=self.vae_scale_factor, mode=\"bilinear\")\n x = torch.matmul(x.permute(0, 2, 3, 1), da).permute(0, 3, 1, 2)\n return x\n else:\n scaled_latents = 1 / 0.18215 * latents\n image = self.vae.decode(scaled_latents.to(self.vae_device), return_dict=False)[0].to(self.device)\n image = (image * 0.5 + 0.5).clamp(0, 1)\n return image\n\n def sds_loss(\n self,\n text_embeddings: Union[Float[Tensor, \"BB 77 768\"], Float[Tensor, \"2 77 768\"]],\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n starting_image: Float[Tensor, \"B 3 H W\"],\n text_guidance_scale: Optional[float] = None,\n image_guidance_scale: Optional[float] = None,\n starting_lower_bound: float = 0.02,\n starting_upper_bound: float = 0.98,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n ) -> torch.Tensor:\n \"\"\"Score Distilation Sampling loss proposed in DreamFusion paper (https://dreamfusion3d.github.io/)\n Args:\n text_embeddings: Text embeddings\n image: Rendered image\n mask: Mask, inpaint where 1\n text_guidance_scale: How much to weigh the guidance\n image_guidance_scale: How much to weigh the guidance\n Returns:\n The loss\n \"\"\"\n\n # NOTE: doesn't work for gridding right now\n\n batch_size, _, height, width = image.shape\n\n min_step = int(self.num_train_timesteps * starting_lower_bound)\n max_step = int(self.num_train_timesteps * starting_upper_bound)\n\n t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device)\n\n model_input = self.get_model_input(\n image=image,\n mask=mask,\n generator=generator,\n starting_image=starting_image,\n starting_timestep=t,\n keep_grad=True,\n )\n\n # predict the noise residual with unet, NO grad!\n with torch.no_grad():\n _, _, noise_pred = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n only_noise_pred=True,\n )\n\n # w(t), sigma_t^2\n w = 1 - self.alphas[t]\n\n grad = w * (noise_pred - model_input.noise)\n grad = torch.nan_to_num(grad)\n\n target = (model_input.latents - grad).detach()\n loss = (\n 0.5\n * torch.nn.functional.mse_loss(model_input.latents, target, reduction=\"sum\")\n / model_input.latents.shape[0]\n )\n\n return loss"
},
{
"identifier": "LaMaInpainter",
"path": "nerfiller/inpaint/lama_inpainter.py",
"snippet": "class LaMaInpainter:\n \"\"\"LaMa inpainter model.\"\"\"\n\n def __init__(self, device: str = \"cuda:0\", model_path: Path = Path(\"data/models/big-lama\")):\n print(f\"Loading LaMa inpainter ...\")\n\n self.device = device\n\n train_config_path = os.path.join(model_path, \"config.yaml\")\n with open(train_config_path, \"r\") as f:\n train_config = OmegaConf.create(yaml.safe_load(f))\n\n train_config.training_model.predict_only = True\n train_config.visualizer.kind = \"noop\"\n\n checkpoint_path = os.path.join(model_path, \"models\", \"best.ckpt\")\n\n self.model = load_checkpoint(train_config, checkpoint_path, strict=False, map_location=\"cpu\")\n self.model.freeze()\n self.model.to(self.device)\n\n def get_image(self, image: Float[Tensor, \"B 3 H W\"], mask: Float[Tensor, \"B 1 H W\"]):\n with torch.no_grad():\n batch = {}\n batch[\"image\"] = image\n batch[\"mask\"] = mask\n batch = self.model(batch)\n inpainted_image = batch[\"inpainted\"]\n return inpainted_image"
},
{
"identifier": "parse_nerfstudio_frame",
"path": "nerfiller/nerf/dataset_utils.py",
"snippet": "def parse_nerfstudio_frame(\n transforms: Dict,\n data_path: Path,\n idx: int,\n depth_max: int = None,\n device: str = \"cpu\",\n size: Optional[Tuple[int, int]] = None,\n dtype=torch.float32,\n):\n \"\"\"Parses a Nerfstudio frame, where idx == 0 is the first image sorted by filename.\n The frames are not normally sorted, but we sort them before doing any operations.\n We return processed information where we load images, depth maps, and masks, useful for inpainting this dataset.\n Size will resize the image to (height, width).\n \"\"\"\n sorted_frames = sorted(transforms[\"frames\"], key=lambda x: x[\"file_path\"])\n imf = data_path / Path(sorted_frames[idx][\"file_path\"])\n image = torch.from_numpy(mediapy.read_image(imf) / 255.0).permute(2, 0, 1)[None].to(dtype).to(device)\n if \"mask_path\" in sorted_frames[idx]:\n maf = data_path / Path(sorted_frames[idx][\"mask_path\"])\n mask = 1 - torch.from_numpy(mediapy.read_image(maf) / 255.0)[None, None].to(dtype).to(device)\n else:\n mask = torch.zeros_like(image[:, :1])\n if \"depth_file_path\" in sorted_frames[idx]:\n daf = data_path / Path(sorted_frames[idx][\"depth_file_path\"])\n depth = torch.from_numpy(np.load(daf))[None, None].to(dtype).to(device)\n else:\n depth = torch.zeros_like(image[:, :1])\n # image *= 1 - mask\n # depth *= 1 - mask\n if depth_max:\n depth[depth > depth_max] = 0.0\n # check if the values are stored per frame\n if \"fl_x\" in sorted_frames[idx]:\n fx = sorted_frames[idx][\"fl_x\"]\n fy = sorted_frames[idx][\"fl_y\"]\n cx = sorted_frames[idx][\"cx\"]\n cy = sorted_frames[idx][\"cy\"]\n else:\n fx = transforms[\"fl_x\"]\n fy = transforms[\"fl_y\"]\n cx = transforms[\"cx\"]\n cy = transforms[\"cy\"]\n K = torch.tensor([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=torch.float32, device=device)\n c2wh = torch.tensor(sorted_frames[idx][\"transform_matrix\"]).to(torch.float32).to(device)\n c2w = c2wh[:3]\n w2ch = torch.inverse(c2wh)\n w2c = w2ch[:3]\n K = K[None]\n c2w = c2w[None]\n\n if size:\n scale_factor_x = size[1] / image.shape[-1]\n scale_factor_y = size[0] / image.shape[-2]\n image = torch.nn.functional.interpolate(image, size=size, mode=\"bilinear\")\n depth = torch.nn.functional.interpolate(depth, size=size, mode=\"bilinear\")\n mask = torch.nn.functional.interpolate(mask, size=size, mode=\"nearest\")\n K = rescale_intrinsics(K, scale_factor_x, scale_factor_y)\n\n return image, depth, mask, c2w, K"
},
{
"identifier": "get_inpainted_image_row",
"path": "nerfiller/utils/image_utils.py",
"snippet": "def get_inpainted_image_row(\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n inpainted_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n color: Tuple[float, float, float] = Colors.NEON_PINK.value,\n show_original: bool = False,\n):\n \"\"\"Returns an image concatenated along the x-axis. It has the following form:\n image with inpaint regions highlighted | image with inpainted regions\n Inpaint where mask == 1.\n The default color is neon pink.\n If the inpainted image is None, then just show the `image with inpaint regions highlighted`.\n \"\"\"\n device = image.device\n c = torch.tensor(color, device=device).view(1, 3, 1, 1)\n color_image = torch.ones_like(image) * c\n image_with_highlights = torch.where(mask == 1, color_image, image)\n image_list = [image_with_highlights]\n if inpainted_image is not None:\n image_list = image_list + [inpainted_image]\n if show_original:\n image_list = [image] + image_list\n im = torch.cat(image_list, dim=-2)\n return im"
},
{
"identifier": "rescale_intrinsics",
"path": "nerfiller/utils/camera_utils.py",
"snippet": "def rescale_intrinsics(Ks: Float[Tensor, \"B 3 3 3\"], scale_factor_x: float, scale_factor_y: float):\n Ks_new = Ks.clone()\n Ks_new[:, 0:1] *= scale_factor_x\n Ks_new[:, 1:2] *= scale_factor_y\n return Ks_new"
},
{
"identifier": "InpaintConfig",
"path": "nerfiller/configs/inpaint.py",
"snippet": "class InpaintConfig:"
},
{
"identifier": "register_extended_attention",
"path": "nerfiller/utils/diff_utils.py",
"snippet": "def register_extended_attention(unet):\n \"\"\"Method from Tune-A-Video, but code modified from TokenFlow codebase.\"\"\"\n\n def sa_forward(self):\n to_out = self.to_out\n if type(to_out) is torch.nn.modules.container.ModuleList:\n to_out = self.to_out[0]\n else:\n to_out = self.to_out\n\n def forward(x, encoder_hidden_states=None, attention_mask=None):\n batch_size, sequence_length, dim = x.shape\n h = self.heads\n # Here we are making an assumption about passing in 3 varients of conditioning into the model\n n_frames = batch_size // 3\n is_cross = encoder_hidden_states is not None\n encoder_hidden_states = encoder_hidden_states if is_cross else x\n q = self.to_q(x)\n k = self.to_k(encoder_hidden_states)\n v = self.to_v(encoder_hidden_states)\n\n k_0 = k[:n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n k_1 = k[n_frames : 2 * n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n k_2 = k[2 * n_frames :].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n v_0 = v[:n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n v_1 = v[n_frames : 2 * n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n v_2 = v[2 * n_frames :].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n\n q_0 = self.head_to_batch_dim(q[:n_frames])\n q_1 = self.head_to_batch_dim(q[n_frames : 2 * n_frames])\n q_2 = self.head_to_batch_dim(q[2 * n_frames :])\n k_0 = self.head_to_batch_dim(k_0)\n k_1 = self.head_to_batch_dim(k_1)\n k_2 = self.head_to_batch_dim(k_2)\n v_0 = self.head_to_batch_dim(v_0)\n v_1 = self.head_to_batch_dim(v_1)\n v_2 = self.head_to_batch_dim(v_2)\n\n out_0 = []\n out_1 = []\n out_2 = []\n\n q_0 = q_0.view(n_frames, h, sequence_length, dim // h)\n k_0 = k_0.view(n_frames, h, sequence_length * n_frames, dim // h)\n v_0 = v_0.view(n_frames, h, sequence_length * n_frames, dim // h)\n q_1 = q_1.view(n_frames, h, sequence_length, dim // h)\n k_1 = k_1.view(n_frames, h, sequence_length * n_frames, dim // h)\n v_1 = v_1.view(n_frames, h, sequence_length * n_frames, dim // h)\n q_2 = q_2.view(n_frames, h, sequence_length, dim // h)\n k_2 = k_2.view(n_frames, h, sequence_length * n_frames, dim // h)\n v_2 = v_2.view(n_frames, h, sequence_length * n_frames, dim // h)\n\n for j in range(h):\n sim_0 = torch.bmm(q_0[:, j], k_0[:, j].transpose(-1, -2)) * self.scale\n sim_1 = torch.bmm(q_1[:, j], k_1[:, j].transpose(-1, -2)) * self.scale\n sim_2 = torch.bmm(q_2[:, j], k_2[:, j].transpose(-1, -2)) * self.scale\n\n out_0.append(torch.bmm(sim_0.softmax(dim=-1), v_0[:, j]))\n out_1.append(torch.bmm(sim_1.softmax(dim=-1), v_1[:, j]))\n out_2.append(torch.bmm(sim_2.softmax(dim=-1), v_2[:, j]))\n\n out_0 = (\n torch.cat(out_0, dim=0)\n .view(h, n_frames, sequence_length, dim // h)\n .permute(1, 0, 2, 3)\n .reshape(h * n_frames, sequence_length, -1)\n )\n out_1 = (\n torch.cat(out_1, dim=0)\n .view(h, n_frames, sequence_length, dim // h)\n .permute(1, 0, 2, 3)\n .reshape(h * n_frames, sequence_length, -1)\n )\n out_2 = (\n torch.cat(out_2, dim=0)\n .view(h, n_frames, sequence_length, dim // h)\n .permute(1, 0, 2, 3)\n .reshape(h * n_frames, sequence_length, -1)\n )\n\n out = torch.cat([out_0, out_1, out_2], dim=0)\n out = self.batch_to_head_dim(out)\n\n return to_out(out)\n\n return forward\n\n for _, unet_module in unet.named_modules():\n if isinstance_str(unet_module, \"BasicTransformerBlock\"):\n module = unet_module.attn1\n module.forward = sa_forward(module)\n\n res_dict = {1: [1, 2], 2: [0, 1, 2], 3: [0, 1, 2]}\n # we are injecting attention in blocks 4 - 11 of the decoder, so not in the first block of the lowest resolution\n for res in res_dict:\n for block in res_dict[res]:\n module = unet.up_blocks[res].attentions[block].transformer_blocks[0].attn1\n module.forward = sa_forward(module)"
},
{
"identifier": "downscale_mask",
"path": "nerfiller/utils/mask_utils.py",
"snippet": "def downscale_mask(mask, size=None, scale_factor=None, dilate_iters=0, dilate_kernel_size=3):\n \"\"\"\n Downscale the mask in a conservative way. 1s are where to inpaint, 0 where to not inpaint.\n Inpaints extra pixels to prevent leakage under the mask.\n \"\"\"\n assert size or scale_factor\n if size:\n assert scale_factor is None\n if scale_factor:\n assert size is None\n for _ in range(dilate_iters):\n mask = dilate(mask, kernel_size=dilate_kernel_size)\n mask = torch.nn.functional.interpolate(mask, size=size, scale_factor=scale_factor, mode=\"bilinear\")\n mask = (mask != 0.0).float() # expands the mask slightly for no leakage of pixels\n return mask"
}
] | import json
import shutil
import mediapy
import torch
import tyro
import math
from pathlib import Path
from nerfiller.inpaint.rgb_inpainter import RGBInpainter
from nerfiller.inpaint.lama_inpainter import LaMaInpainter
from nerfiller.nerf.dataset_utils import parse_nerfstudio_frame
from nerfiller.utils.image_utils import get_inpainted_image_row
from nerfiller.utils.camera_utils import rescale_intrinsics
from nerfiller.configs.inpaint import InpaintConfig, AnnotatedBaseConfigUnion
from datetime import datetime
from nerfiller.utils.diff_utils import register_extended_attention
from nerfiller.utils.mask_utils import downscale_mask | 10,722 |
def main(
config: InpaintConfig,
):
"""
Inpaint a Nerfstudio dataset where the masks == 0.
"""
if config.method_name == "individual-lama":
rgb_inpainter = LaMaInpainter(device=config.device, model_path=Path("data/models/big-lama"))
else:
# Load the inpainting module.
rgb_inpainter = RGBInpainter(
half_precision_weights=config.half_precision_weights,
lora_model_path=config.lora_model_path,
device=config.device,
vae_device=config.vae_device,
)
if config.text_guidance_scale != 0.0:
assert config.prompt != "", "You need to set an actual prompt to use this method."
# Process the text prompts.
text_embeddings = rgb_inpainter.compute_text_embeddings(config.prompt, config.negative_prompt)
if config.use_expanded_attention:
|
def main(
config: InpaintConfig,
):
"""
Inpaint a Nerfstudio dataset where the masks == 0.
"""
if config.method_name == "individual-lama":
rgb_inpainter = LaMaInpainter(device=config.device, model_path=Path("data/models/big-lama"))
else:
# Load the inpainting module.
rgb_inpainter = RGBInpainter(
half_precision_weights=config.half_precision_weights,
lora_model_path=config.lora_model_path,
device=config.device,
vae_device=config.vae_device,
)
if config.text_guidance_scale != 0.0:
assert config.prompt != "", "You need to set an actual prompt to use this method."
# Process the text prompts.
text_embeddings = rgb_inpainter.compute_text_embeddings(config.prompt, config.negative_prompt)
if config.use_expanded_attention: | register_extended_attention(rgb_inpainter.unet) | 6 | 2023-12-07 19:12:08+00:00 | 12k |
nnanhuang/Customize-it-3D | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
},
{
"identifier": "default",
"path": "ldm/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "ismap",
"path": "ldm/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "isimage",
"path": "ldm/util.py",
"snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)"
},
{
"identifier": "mean_flat",
"path": "ldm/util.py",
"snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))"
},
{
"identifier": "count_params",
"path": "ldm/util.py",
"snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "LitEma",
"path": "ldm/modules/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "normal_kl",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean"
},
{
"identifier": "VQModelInterface",
"path": "ldm/models/autoencoder.py",
"snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec"
},
{
"identifier": "IdentityFirstStage",
"path": "ldm/models/autoencoder.py",
"snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x"
},
{
"identifier": "AutoencoderKL",
"path": "ldm/models/autoencoder.py",
"snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x"
},
{
"identifier": "make_beta_schedule",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "extract_into_tensor",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "noise_like",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "DDIMSampler",
"path": "ldm/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n t_start=-1):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback: \n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec"
},
{
"identifier": "CrossAttention",
"path": "ldm/modules/attention.py",
"snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head ** -0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim),\n nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))\n\n sim = einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=h)\n return self.to_out(out)"
}
] | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.rank_zero import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.modules.attention import CrossAttention | 9,718 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
):
super().__init__()
assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
):
super().__init__()
assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key) | count_params(self.model, verbose=True) | 6 | 2023-12-14 11:03:35+00:00 | 12k |
TaoHuang13/diffusion_reward | scripts/train_vqdiffusion.py | [
{
"identifier": "build_dataloader",
"path": "diffusion_reward/models/video_models/vqdiffusion/data/build.py",
"snippet": "def build_dataloader(config, args=None, return_dataset=True):\n dataset_cfg = config['dataloader']\n train_dataset = []\n for ds_cfg in dataset_cfg['train_datasets']:\n ds_cfg['params']['data_root'] = dataset_cfg.get('data_root', '')\n ds = instantiate_from_config(ds_cfg)\n train_dataset.append(ds)\n if len(train_dataset) > 1:\n train_dataset = ConcatDataset(train_dataset)\n else:\n train_dataset = train_dataset[0]\n \n val_dataset = []\n for ds_cfg in dataset_cfg['validation_datasets']:\n ds_cfg['params']['data_root'] = dataset_cfg.get('data_root', '')\n ds = instantiate_from_config(ds_cfg)\n val_dataset.append(ds)\n if len(val_dataset) > 1:\n val_dataset = ConcatDataset(val_dataset)\n else:\n val_dataset = val_dataset[0]\n \n if args is not None and args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)\n train_iters = len(train_sampler) // dataset_cfg['batch_size']\n val_iters = len(val_sampler) // dataset_cfg['batch_size']\n else:\n train_sampler = None\n val_sampler = None\n train_iters = len(train_dataset) // dataset_cfg['batch_size']\n val_iters = len(val_dataset) // dataset_cfg['batch_size']\n\n # if args is not None and not args.debug:\n # num_workers = max(2*dataset_cfg['batch_size'], dataset_cfg['num_workers'])\n # num_workers = min(64, num_workers)\n # else:\n # num_workers = dataset_cfg['num_workers']\n num_workers = dataset_cfg['num_workers']\n train_loader = torch.utils.data.DataLoader(train_dataset, \n batch_size=dataset_cfg['batch_size'], \n shuffle=(train_sampler is None),\n num_workers=num_workers, \n pin_memory=True, \n sampler=train_sampler, \n drop_last=True,\n persistent_workers=True)\n\n val_loader = torch.utils.data.DataLoader(val_dataset, \n batch_size=dataset_cfg['batch_size'], \n shuffle=False, #(val_sampler is None),\n num_workers=num_workers, \n pin_memory=True, \n sampler=val_sampler, \n drop_last=True,\n persistent_workers=True)\n\n dataload_info = {\n 'train_loader': train_loader,\n 'validation_loader': val_loader,\n 'train_iterations': train_iters,\n 'validation_iterations': val_iters\n }\n \n if return_dataset:\n dataload_info['train_dataset'] = train_dataset\n dataload_info['validation_dataset'] = val_dataset\n\n return dataload_info"
},
{
"identifier": "launch",
"path": "diffusion_reward/models/video_models/vqdiffusion/distributed/launch.py",
"snippet": "def launch(fn, n_gpu_per_machine, n_machine=1, machine_rank=0, dist_url=None, args=()):\n world_size = n_machine * n_gpu_per_machine\n\n if world_size > 1:\n # if \"OMP_NUM_THREADS\" not in os.environ:\n # os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n if dist_url == \"auto\":\n if n_machine != 1:\n raise ValueError('dist_url=\"auto\" not supported in multi-machine jobs')\n\n port = find_free_port()\n dist_url = f\"tcp://127.0.0.1:{port}\"\n\n if n_machine > 1 and dist_url.startswith(\"file://\"):\n raise ValueError(\n \"file:// is not a reliable init method in multi-machine jobs. Prefer tcp://\"\n )\n\n mp.spawn(\n distributed_worker,\n nprocs=n_gpu_per_machine,\n args=(fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args),\n daemon=False,\n )\n\n else:\n local_rank = 0\n fn(local_rank, *args)"
},
{
"identifier": "Logger",
"path": "diffusion_reward/models/video_models/vqdiffusion/engine/logger.py",
"snippet": "class Logger(object):\n def __init__(self, args):\n self.args = args\n self.save_dir = args.save_dir\n self.is_primary = is_primary()\n \n if self.is_primary:\n # save the args and config\n self.config_dir = 'configs'\n os.makedirs(self.config_dir, exist_ok=True)\n file_name = os.path.join(self.config_dir, 'args.txt')\n write_args(args, file_name)\n\n log_dir = 'logs'\n if not os.path.exists(log_dir):\n os.makedirs(log_dir, exist_ok=True)\n self.text_writer = open(os.path.join(log_dir, 'log.txt'), 'a') # 'w')\n if args.tensorboard:\n self.log_info('using tensorboard')\n self.tb_writer = torch.utils.tensorboard.SummaryWriter(log_dir=log_dir) # tensorboard.SummaryWriter(log_dir=log_dir)\n else:\n self.tb_writer = None\n \n\n def save_config(self, config):\n if self.is_primary:\n save_config_to_yaml(config, os.path.join(self.config_dir, 'config.yaml'))\n\n def log_info(self, info, check_primary=True):\n if self.is_primary or (not check_primary):\n print(info)\n if self.is_primary:\n info = str(info)\n time_str = time.strftime('%Y-%m-%d-%H-%M')\n info = '{}: {}'.format(time_str, info)\n if not info.endswith('\\n'):\n info += '\\n'\n self.text_writer.write(info)\n self.text_writer.flush()\n\n def add_scalar(self, **kargs):\n \"\"\"Log a scalar variable.\"\"\"\n if self.is_primary:\n if self.tb_writer is not None:\n self.tb_writer.add_scalar(**kargs)\n\n def add_scalars(self, **kargs):\n \"\"\"Log a scalar variable.\"\"\"\n if self.is_primary:\n if self.tb_writer is not None:\n self.tb_writer.add_scalars(**kargs)\n\n def add_image(self, **kargs):\n \"\"\"Log a scalar variable.\"\"\"\n if self.is_primary:\n if self.tb_writer is not None:\n self.tb_writer.add_image(**kargs)\n\n def add_images(self, **kargs):\n \"\"\"Log a scalar variable.\"\"\"\n if self.is_primary:\n if self.tb_writer is not None:\n self.tb_writer.add_images(**kargs)\n\n\n def close(self):\n if self.is_primary:\n self.text_writer.close()\n self.tb_writer.close()"
},
{
"identifier": "Solver",
"path": "diffusion_reward/models/video_models/vqdiffusion/engine/solver.py",
"snippet": "class Solver(object):\n def __init__(self, config, args, model, dataloader, logger):\n self.config = config\n self.args = args\n self.model = model \n self.dataloader = dataloader\n self.logger = logger\n \n self.max_epochs = config['solver']['max_epochs']\n self.save_epochs = config['solver']['save_epochs']\n self.save_iterations = config['solver'].get('save_iterations', -1)\n self.sample_iterations = config['solver']['sample_iterations']\n if self.sample_iterations == 'epoch':\n self.sample_iterations = self.dataloader['train_iterations']\n self.validation_epochs = config['solver'].get('validation_epochs', 2)\n assert isinstance(self.save_epochs, (int, list))\n assert isinstance(self.validation_epochs, (int, list))\n self.debug = config['solver'].get('debug', False)\n\n self.last_epoch = -1\n self.last_iter = -1\n # self.ckpt_dir = os.path.join(args.save_dir, 'checkpoint')\n # self.image_dir = os.path.join(args.save_dir, 'images')\n self.ckpt_dir = \"checkpoint\"\n self.image_dir = \"images\"\n os.makedirs(self.ckpt_dir, exist_ok=True)\n os.makedirs(self.image_dir, exist_ok=True)\n\n # get grad_clipper\n if 'clip_grad_norm' in config['solver']:\n self.clip_grad_norm = instantiate_from_config(config['solver']['clip_grad_norm'])\n else:\n self.clip_grad_norm = None\n\n # get lr\n adjust_lr = config['solver'].get('adjust_lr', 'sqrt')\n base_lr = config['solver'].get('base_lr', 1.0e-4)\n if adjust_lr == 'none':\n self.lr = base_lr\n elif adjust_lr == 'sqrt':\n self.lr = base_lr * math.sqrt(args.world_size * config['dataloader']['batch_size'])\n elif adjust_lr == 'linear':\n self.lr = base_lr * args.world_size * config['dataloader']['batch_size']\n else:\n raise NotImplementedError('Unknown type of adjust lr {}!'.format(adjust_lr))\n self.logger.log_info('Get lr {} from base lr {} with {}'.format(self.lr, base_lr, adjust_lr))\n\n if hasattr(model, 'get_optimizer_and_scheduler') and callable(getattr(model, 'get_optimizer_and_scheduler')):\n optimizer_and_scheduler = model.get_optimizer_and_scheduler(config['solver']['optimizers_and_schedulers'])\n else:\n optimizer_and_scheduler = self._get_optimizer_and_scheduler(config['solver']['optimizers_and_schedulers'])\n\n assert type(optimizer_and_scheduler) == type({}), 'optimizer and schduler should be a dict!'\n self.optimizer_and_scheduler = optimizer_and_scheduler\n\n # configre for ema\n if 'ema' in config['solver'] and args.local_rank == 0:\n ema_args = config['solver']['ema']\n ema_args = OmegaConf.to_container(copy.deepcopy(ema_args), resolve=True)\n ema_args['model'] = self.model\n self.ema = EMA(**ema_args)\n else:\n self.ema = None\n\n self.logger.log_info(str(get_model_parameters_info(self.model)))\n self.model.cuda()\n self.device = self.model.device\n if self.args.distributed:\n self.logger.log_info('Distributed, begin DDP the model...')\n self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.args.gpu], find_unused_parameters=False)\n self.logger.log_info('Distributed, DDP model done!')\n # prepare for amp\n self.args.amp = self.args.amp and AMP\n if self.args.amp:\n self.scaler = GradScaler()\n self.logger.log_info('Using AMP for training!')\n\n self.logger.log_info(\"{}: global rank {}: prepare solver done!\".format(self.args.exp_name,self.args.global_rank), check_primary=False)\n\n self.best_loss = float('inf')\n\n def _get_optimizer_and_scheduler(self, op_sc_list):\n optimizer_and_scheduler = {}\n for op_sc_cfg in op_sc_list:\n op_sc = {\n 'name': op_sc_cfg.get('name', 'none'),\n 'start_epoch': op_sc_cfg.get('start_epoch', 0),\n 'end_epoch': op_sc_cfg.get('end_epoch', -1),\n 'start_iteration': op_sc_cfg.get('start_iteration', 0),\n 'end_iteration': op_sc_cfg.get('end_iteration', -1),\n }\n\n if op_sc['name'] == 'none':\n # parameters = self.model.parameters()\n parameters = filter(lambda p: p.requires_grad, self.model.parameters())\n else:\n # NOTE: get the parameters with the given name, the parameters() should be overide\n parameters = self.model.parameters(name=op_sc['name'])\n \n # build optimizer\n op_cfg = op_sc_cfg.get('optimizer', {'target': 'torch.optim.SGD', 'params': {}})\n op_cfg = OmegaConf.to_container(copy.deepcopy(op_cfg), resolve=True)\n if 'params' not in op_cfg:\n op_cfg['params'] = {}\n if 'lr' not in op_cfg['params']:\n op_cfg['params']['lr'] = self.lr\n op_cfg['params']['params'] = parameters\n optimizer = instantiate_from_config(op_cfg)\n op_sc['optimizer'] = {\n 'module': optimizer,\n 'step_iteration': op_cfg.get('step_iteration', 1)\n }\n assert isinstance(op_sc['optimizer']['step_iteration'], int), 'optimizer steps should be a integer number of iterations'\n\n # build scheduler\n if 'scheduler' in op_sc_cfg:\n sc_cfg = OmegaConf.to_container(copy.deepcopy(op_sc_cfg['scheduler']), resolve=True)\n sc_cfg['params']['optimizer'] = optimizer\n # for cosine annealing lr, compute T_max\n if sc_cfg['target'].split('.')[-1] in ['CosineAnnealingLRWithWarmup', 'CosineAnnealingLR']:\n T_max = self.max_epochs * self.dataloader['train_iterations']\n sc_cfg['params']['T_max'] = T_max\n scheduler = instantiate_from_config(sc_cfg)\n op_sc['scheduler'] = {\n 'module': scheduler,\n 'step_iteration': sc_cfg.get('step_iteration', 1)\n }\n if op_sc['scheduler']['step_iteration'] == 'epoch':\n op_sc['scheduler']['step_iteration'] = self.dataloader['train_iterations']\n optimizer_and_scheduler[op_sc['name']] = op_sc\n\n return optimizer_and_scheduler\n\n def _get_lr(self, return_type='str'):\n \n lrs = {}\n for op_sc_n, op_sc in self.optimizer_and_scheduler.items():\n lr = op_sc['optimizer']['module'].state_dict()['param_groups'][0]['lr']\n lrs[op_sc_n+'_lr'] = round(lr, 10)\n if return_type == 'str':\n lrs = str(lrs)\n lrs = lrs.replace('none', 'lr').replace('{', '').replace('}','').replace('\\'', '')\n elif return_type == 'dict':\n pass \n else:\n raise ValueError('Unknow of return type: {}'.format(return_type))\n return lrs\n\n def sample(self, batch, phase='train', step_type='iteration'):\n tic = time.time()\n self.logger.log_info('Begin to sample...')\n if self.ema is not None:\n self.ema.modify_to_inference()\n suffix = '_ema'\n else:\n suffix = ''\n \n if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):\n model = self.model.module\n else: \n model = self.model \n \n with torch.no_grad(): \n if self.debug == False:\n if self.args.amp:\n with autocast():\n samples = model.sample(batch=batch, step=self.last_iter)\n else:\n samples = model.sample(batch=batch, step=self.last_iter)\n else:\n samples = model.sample(batch=batch[0].cuda(), step=self.last_iter)\n\n step = self.last_iter if step_type == 'iteration' else self.last_epoch\n for k, v in samples.items():\n save_dir = os.path.join(self.image_dir, phase, k)\n os.makedirs(save_dir, exist_ok=True)\n save_path = os.path.join(save_dir, 'e{:010d}_itr{:010d}_rank{}{}'.format(self.last_epoch, self.last_iter%self.dataloader['train_iterations'], get_rank(), suffix))\n if torch.is_tensor(v) and v.dim() == 4 and v.shape[1] in [1, 3]: # image\n im = v\n im = im.to(torch.uint8)\n self.logger.add_images(tag='{}/{}e_{}itr/{}'.format(phase, self.last_epoch, self.last_iter%self.dataloader['train_iterations'], k), img_tensor=im, global_step=step, dataformats='NCHW')\n\n # save images\n im_grid = torchvision.utils.make_grid(im)\n im_grid = im_grid.permute(1, 2, 0).to('cpu').numpy()\n im_grid = Image.fromarray(im_grid)\n\n im_grid.save(save_path + '.jpg')\n self.logger.log_info('save {} to {}'.format(k, save_path+'.jpg'))\n else: # may be other values, such as text caption\n with open(save_path+'.txt', 'a') as f:\n f.write(str(v)+'\\n')\n f.close()\n self.logger.log_info('save {} to {}'.format(k, save_path+'txt'))\n \n if self.ema is not None:\n self.ema.modify_to_train()\n \n self.logger.log_info('Sample done, time: {:.2f}'.format(time.time() - tic))\n\n def step(self, batch, phase='train'):\n loss = {}\n if self.debug == False: \n for k, v in batch.items():\n if torch.is_tensor(v):\n batch[k] = v.cuda()\n else:\n batch = batch[0].cuda()\n for op_sc_n, op_sc in self.optimizer_and_scheduler.items():\n if phase == 'train':\n # check if this optimizer and scheduler is valid in this iteration and epoch\n if op_sc['start_iteration'] > self.last_iter:\n continue\n if op_sc['end_iteration'] > 0 and op_sc['end_iteration'] <= self.last_iter:\n continue\n if op_sc['start_epoch'] > self.last_epoch:\n continue\n if op_sc['end_epoch'] > 0 and op_sc['end_epoch'] <= self.last_epoch:\n continue\n\n input = {\n 'batch': batch,\n 'return_loss': True,\n 'step': self.last_iter,\n }\n if op_sc_n != 'none':\n input['name'] = op_sc_n\n\n if phase == 'train':\n if self.args.amp:\n with autocast():\n output = self.model(**input)\n else:\n output = self.model(**input)\n else:\n with torch.no_grad():\n if self.args.amp:\n with autocast():\n output = self.model(**input)\n else:\n output = self.model(**input)\n \n if phase == 'train':\n if op_sc['optimizer']['step_iteration'] > 0 and (self.last_iter + 1) % op_sc['optimizer']['step_iteration'] == 0:\n op_sc['optimizer']['module'].zero_grad()\n if self.args.amp:\n self.scaler.scale(output['loss']).backward()\n if self.clip_grad_norm is not None:\n self.clip_grad_norm(self.model.parameters())\n self.scaler.step(op_sc['optimizer']['module'])\n self.scaler.update()\n else:\n output['loss'].backward()\n if self.clip_grad_norm is not None:\n self.clip_grad_norm(self.model.parameters())\n op_sc['optimizer']['module'].step()\n \n if 'scheduler' in op_sc:\n if op_sc['scheduler']['step_iteration'] > 0 and (self.last_iter + 1) % op_sc['scheduler']['step_iteration'] == 0:\n if isinstance(op_sc['scheduler']['module'], STEP_WITH_LOSS_SCHEDULERS):\n op_sc['scheduler']['module'].step(output.get('loss'))\n else:\n op_sc['scheduler']['module'].step()\n # update ema model\n if self.ema is not None:\n self.ema.update(iteration=self.last_iter)\n\n loss[op_sc_n] = {k: v for k, v in output.items() if ('loss' in k or 'acc' in k)}\n return loss\n\n def save(self, force=False, is_best=False):\n if is_primary():\n # save with the epoch specified name\n if self.save_iterations > 0:\n if (self.last_iter + 1) % self.save_iterations == 0:\n save = True \n else:\n save = False\n else:\n if isinstance(self.save_epochs, int):\n save = (self.last_epoch + 1) % self.save_epochs == 0\n else:\n save = (self.last_epoch + 1) in self.save_epochs\n \n if save or force:\n state_dict = {\n 'last_epoch': self.last_epoch,\n 'last_iter': self.last_iter,\n 'model': self.model.module.state_dict() if isinstance(self.model, torch.nn.parallel.DistributedDataParallel) else self.model.state_dict() \n }\n if self.ema is not None:\n state_dict['ema'] = self.ema.state_dict()\n if self.clip_grad_norm is not None:\n state_dict['clip_grad_norm'] = self.clip_grad_norm.state_dict()\n\n # add optimizers and schedulers\n optimizer_and_scheduler = {}\n for op_sc_n, op_sc in self.optimizer_and_scheduler.items():\n state_ = {}\n for k in op_sc:\n if k in ['optimizer', 'scheduler']:\n op_or_sc = {kk: vv for kk, vv in op_sc[k].items() if kk != 'module'}\n op_or_sc['module'] = op_sc[k]['module'].state_dict()\n state_[k] = op_or_sc\n else:\n state_[k] = op_sc[k]\n optimizer_and_scheduler[op_sc_n] = state_\n\n state_dict['optimizer_and_scheduler'] = optimizer_and_scheduler\n \n if save:\n save_path = os.path.join(self.ckpt_dir, '{}e_{}iter.pth'.format(str(self.last_epoch).zfill(6), self.last_iter))\n torch.save(state_dict, save_path)\n self.logger.log_info('saved in {}'.format(save_path)) \n \n # save with the last name\n save_path = os.path.join(self.ckpt_dir, 'last.pth')\n torch.save(state_dict, save_path) \n self.logger.log_info('saved in {}'.format(save_path)) \n\n if is_best:\n save_path = os.path.join(self.ckpt_dir, 'best.pth')\n torch.save(state_dict, save_path) \n self.logger.log_info('best checktpoint saved in {} !!!!!!!!!!'.format(save_path)) \n \n def resume(self, \n path=None, # The path of last.pth\n load_optimizer_and_scheduler=True, # whether to load optimizers and scheduler\n load_others=True # load other informations\n ): \n if path is None:\n path = os.path.join(self.ckpt_dir, 'last.pth')\n\n if os.path.exists(path):\n state_dict = torch.load(path, map_location='cuda:{}'.format(self.args.local_rank))\n\n if load_others:\n self.last_epoch = state_dict['last_epoch']\n self.last_iter = state_dict['last_iter']\n \n if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):\n try:\n self.model.module.load_state_dict(state_dict['model'])\n except:\n model_dict = self.model.module.state_dict()\n temp_state_dict = {k:v for k,v in state_dict['model'].items() if k in model_dict.keys()}\n model_dict.update(temp_state_dict)\n self.model.module.load_state_dict(model_dict)\n else:\n self.model.load_state_dict(state_dict['model'])\n\n if 'ema' in state_dict and self.ema is not None:\n try:\n self.ema.load_state_dict(state_dict['ema'])\n except:\n model_dict = self.ema.state_dict()\n temp_state_dict = {k:v for k,v in state_dict['ema'].items() if k in model_dict.keys()}\n model_dict.update(temp_state_dict)\n self.ema.load_state_dict(model_dict)\n\n if 'clip_grad_norm' in state_dict and self.clip_grad_norm is not None:\n self.clip_grad_norm.load_state_dict(state_dict['clip_grad_norm'])\n\n # handle optimizer and scheduler\n for op_sc_n, op_sc in state_dict['optimizer_and_scheduler'].items():\n for k in op_sc:\n if k in ['optimizer', 'scheduler']:\n for kk in op_sc[k]:\n if kk == 'module' and load_optimizer_and_scheduler:\n self.optimizer_and_scheduler[op_sc_n][k][kk].load_state_dict(op_sc[k][kk])\n elif load_others: # such as step_iteration, ...\n self.optimizer_and_scheduler[op_sc_n][k][kk] = op_sc[k][kk]\n elif load_others: # such as start_epoch, end_epoch, ....\n self.optimizer_and_scheduler[op_sc_n][k] = op_sc[k]\n \n self.logger.log_info('Resume from {}'.format(path))\n \n def train_epoch(self):\n self.model.train()\n self.last_epoch += 1\n\n if self.args.distributed:\n self.dataloader['train_loader'].sampler.set_epoch(self.last_epoch)\n\n epoch_start = time.time()\n itr_start = time.time()\n itr = -1\n for itr, batch in enumerate(self.dataloader['train_loader']):\n if itr == 0:\n print(\"time2 is \" + str(time.time()))\n data_time = time.time() - itr_start\n step_start = time.time()\n self.last_iter += 1\n loss = self.step(batch, phase='train')\n # logging info\n if self.logger is not None and self.last_iter % self.args.log_frequency == 0:\n info = '{}: train'.format(self.args.exp_name)\n info = info + ': Epoch {}/{} iter {}/{}'.format(self.last_epoch, self.max_epochs, self.last_iter%self.dataloader['train_iterations'], self.dataloader['train_iterations'])\n for loss_n, loss_dict in loss.items():\n info += ' ||'\n loss_dict = reduce_dict(loss_dict)\n info += '' if loss_n == 'none' else ' {}'.format(loss_n)\n # info = info + ': Epoch {}/{} iter {}/{}'.format(self.last_epoch, self.max_epochs, self.last_iter%self.dataloader['train_iterations'], self.dataloader['train_iterations'])\n for k in loss_dict:\n info += ' | {}: {:.4f}'.format(k, float(loss_dict[k]))\n self.logger.add_scalar(tag='train/{}/{}'.format(loss_n, k), scalar_value=float(loss_dict[k]), global_step=self.last_iter)\n \n # log lr\n lrs = self._get_lr(return_type='dict')\n for k in lrs.keys():\n lr = lrs[k]\n self.logger.add_scalar(tag='train/{}_lr'.format(k), scalar_value=lrs[k], global_step=self.last_iter)\n\n # add lr to info\n info += ' || {}'.format(self._get_lr())\n \n # add time consumption to info\n spend_time = time.time() - self.start_train_time\n itr_time_avg = spend_time / (self.last_iter + 1)\n info += ' || data_time: {dt}s | fbward_time: {fbt}s | iter_time: {it}s | iter_avg_time: {ita}s | epoch_time: {et} | spend_time: {st} | left_time: {lt}'.format(\n dt=round(data_time, 1),\n it=round(time.time() - itr_start, 1),\n fbt=round(time.time() - step_start, 1),\n ita=round(itr_time_avg, 1),\n et=format_seconds(time.time() - epoch_start),\n st=format_seconds(spend_time),\n lt=format_seconds(itr_time_avg*self.max_epochs*self.dataloader['train_iterations']-spend_time)\n )\n self.logger.log_info(info)\n \n itr_start = time.time()\n\n # modify here to make sure dataloader['train_iterations'] is correct\n assert itr >= 0, \"The data is too less to form one iteration!\"\n self.dataloader['train_iterations'] = itr + 1\n\n def validate_epoch(self):\n if 'validation_loader' not in self.dataloader:\n val = False\n else:\n if isinstance(self.validation_epochs, int):\n val = (self.last_epoch + 1) % self.validation_epochs == 0\n else:\n val = (self.last_epoch + 1) in self.validation_epochs \n\n is_best = False\n if val:\n if self.args.distributed:\n self.dataloader['validation_loader'].sampler.set_epoch(self.last_epoch)\n\n self.model.eval()\n overall_loss = None\n epoch_start = time.time()\n itr_start = time.time()\n itr = -1\n for itr, batch in enumerate(self.dataloader['validation_loader']):\n data_time = time.time() - itr_start\n step_start = time.time()\n loss = self.step(batch, phase='val')\n \n for loss_n, loss_dict in loss.items():\n loss[loss_n] = reduce_dict(loss_dict)\n if overall_loss is None:\n overall_loss = loss\n else:\n for loss_n, loss_dict in loss.items():\n for k, v in loss_dict.items():\n overall_loss[loss_n][k] = (overall_loss[loss_n][k] * itr + loss[loss_n][k]) / (itr + 1)\n \n if self.logger is not None and (itr+1) % self.args.log_frequency == 0:\n info = '{}: val'.format(self.args.exp_name) \n info = info + ': Epoch {}/{} | iter {}/{}'.format(self.last_epoch, self.max_epochs, itr, self.dataloader['validation_iterations'])\n for loss_n, loss_dict in loss.items():\n info += ' ||'\n info += '' if loss_n == 'none' else ' {}'.format(loss_n)\n # info = info + ': Epoch {}/{} | iter {}/{}'.format(self.last_epoch, self.max_epochs, itr, self.dataloader['validation_iterations'])\n for k in loss_dict:\n info += ' | {}: {:.4f}'.format(k, float(loss_dict[k]))\n \n itr_time_avg = (time.time() - epoch_start) / (itr + 1)\n info += ' || data_time: {dt}s | fbward_time: {fbt}s | iter_time: {it}s | epoch_time: {et} | left_time: {lt}'.format(\n dt=round(data_time, 1),\n fbt=round(time.time() - step_start, 1),\n it=round(time.time() - itr_start, 1),\n et=format_seconds(time.time() - epoch_start),\n lt=format_seconds(itr_time_avg*(self.dataloader['train_iterations']-itr-1))\n )\n \n self.logger.log_info(info)\n itr_start = time.time()\n # modify here to make sure dataloader['validation_iterations'] is correct\n assert itr >= 0, \"The data is too less to form one iteration!\"\n self.dataloader['validation_iterations'] = itr + 1\n\n if self.logger is not None:\n info = '{}: val'.format(self.args.exp_name) \n for loss_n, loss_dict in overall_loss.items():\n info += '' if loss_n == 'none' else ' {}'.format(loss_n)\n info += ': Epoch {}/{}'.format(self.last_epoch, self.max_epochs)\n for k in loss_dict:\n info += ' | {}: {:.4f}'.format(k, float(loss_dict[k]))\n self.logger.add_scalar(tag='val/{}/{}'.format(loss_n, k), scalar_value=float(loss_dict[k]), global_step=self.last_epoch)\n self.logger.log_info(info)\n\n print(overall_loss)\n val_loss = sum([loss_dict[k] for k in loss_dict])\n is_best = val_loss < self.best_loss\n self.best_loss = min(self.best_loss, val_loss)\n \n return is_best\n \n def validate(self):\n self.validation_epoch()\n\n def train(self):\n start_epoch = self.last_epoch + 1\n self.start_train_time = time.time()\n self.logger.log_info('{}: global rank {}: start training...'.format(self.args.exp_name, self.args.global_rank), check_primary=False)\n \n for epoch in range(start_epoch, self.max_epochs):\n self.train_epoch()\n is_best = self.validate_epoch()\n self.save(force=True, is_best=is_best)"
},
{
"identifier": "build_model",
"path": "diffusion_reward/models/video_models/vqdiffusion/modeling/build.py",
"snippet": "def build_model(config, args=None):\n return instantiate_from_config(config['model'])"
},
{
"identifier": "load_yaml_config",
"path": "diffusion_reward/models/video_models/vqdiffusion/utils/io.py",
"snippet": "def load_yaml_config(path):\n with open(path) as f:\n config = yaml.full_load(f)\n return config"
},
{
"identifier": "merge_opts_to_config",
"path": "diffusion_reward/models/video_models/vqdiffusion/utils/misc.py",
"snippet": "def merge_opts_to_config(config, opts):\n def modify_dict(c, nl, v):\n if len(nl) == 1:\n c[nl[0]] = type(c[nl[0]])(v)\n else:\n # print(nl)\n c[nl[0]] = modify_dict(c[nl[0]], nl[1:], v)\n return c\n\n if opts is not None and len(opts) > 0:\n assert len(opts) % 2 == 0, \"each opts should be given by the name and values! The length shall be even number!\"\n for i in range(len(opts) // 2):\n name = opts[2*i]\n value = opts[2*i+1]\n config = modify_dict(config, name.split('.'), value)\n return config "
},
{
"identifier": "modify_config_for_debug",
"path": "diffusion_reward/models/video_models/vqdiffusion/utils/misc.py",
"snippet": "def modify_config_for_debug(config):\n config['dataloader']['num_workers'] = 0\n config['dataloader']['batch_size'] = 1\n return config"
},
{
"identifier": "seed_everything",
"path": "diffusion_reward/models/video_models/vqdiffusion/utils/misc.py",
"snippet": "def seed_everything(seed, cudnn_deterministic=False):\n \"\"\"\n Function that sets seed for pseudo-random number generators in:\n pytorch, numpy, python.random\n \n Args:\n seed: the integer value seed for global random state\n \"\"\"\n if seed is not None:\n print(f\"Global seed set to {seed}\")\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n if cudnn_deterministic:\n torch.backends.cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')"
}
] | import os
import warnings
import hydra
import torch
from diffusion_reward.models.video_models.vqdiffusion.data.build import \
build_dataloader
from diffusion_reward.models.video_models.vqdiffusion.distributed.launch import launch
from diffusion_reward.models.video_models.vqdiffusion.engine.logger import Logger
from diffusion_reward.models.video_models.vqdiffusion.engine.solver import Solver
from diffusion_reward.models.video_models.vqdiffusion.modeling.build import \
build_model
from diffusion_reward.models.video_models.vqdiffusion.utils.io import load_yaml_config
from diffusion_reward.models.video_models.vqdiffusion.utils.misc import (
merge_opts_to_config, modify_config_for_debug, seed_everything) | 8,663 |
# environment variables
NODE_RANK = os.environ['AZ_BATCHAI_TASK_INDEX'] if 'AZ_BATCHAI_TASK_INDEX' in os.environ else 0
NODE_RANK = int(NODE_RANK)
MASTER_ADDR, MASTER_PORT = os.environ['AZ_BATCH_MASTER_NODE'].split(':') if 'AZ_BATCH_MASTER_NODE' in os.environ else ("127.0.0.1", 29500)
MASTER_PORT = int(MASTER_PORT)
DIST_URL = 'tcp://%s:%s' % (MASTER_ADDR, MASTER_PORT)
@hydra.main(config_path='../diffusion_reward/configs/models/video_models/vqdiffusion', config_name='default')
def main(args):
args.save_dir = os.path.abspath(os.path.dirname(__file__))
args.node_rank = NODE_RANK
args.dist_url = DIST_URL
if args.seed is not None or args.cudnn_deterministic:
seed_everything(args.seed, args.cudnn_deterministic)
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely disable ddp.')
torch.cuda.set_device(args.gpu)
args.ngpus_per_node = 1
args.world_size = 1
else:
if args.num_node == 1:
args.dist_url == "auto"
else:
assert args.num_node > 1
args.ngpus_per_node = torch.cuda.device_count()
args.world_size = args.ngpus_per_node * args.num_node
launch(main_worker, args.ngpus_per_node, args.num_node, args.node_rank, args.dist_url, args=(args,))
def main_worker(local_rank, args):
args.local_rank = local_rank
args.global_rank = args.local_rank + args.node_rank * args.ngpus_per_node
# load config
config = args
config = merge_opts_to_config(config, args.opts)
if args.debug:
config = modify_config_for_debug(config)
# get logger
logger = Logger(args)
# get model
model = build_model(config, args)
# print(model)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# get dataloader
dataloader_info = build_dataloader(config, args)
# get solver
|
# environment variables
NODE_RANK = os.environ['AZ_BATCHAI_TASK_INDEX'] if 'AZ_BATCHAI_TASK_INDEX' in os.environ else 0
NODE_RANK = int(NODE_RANK)
MASTER_ADDR, MASTER_PORT = os.environ['AZ_BATCH_MASTER_NODE'].split(':') if 'AZ_BATCH_MASTER_NODE' in os.environ else ("127.0.0.1", 29500)
MASTER_PORT = int(MASTER_PORT)
DIST_URL = 'tcp://%s:%s' % (MASTER_ADDR, MASTER_PORT)
@hydra.main(config_path='../diffusion_reward/configs/models/video_models/vqdiffusion', config_name='default')
def main(args):
args.save_dir = os.path.abspath(os.path.dirname(__file__))
args.node_rank = NODE_RANK
args.dist_url = DIST_URL
if args.seed is not None or args.cudnn_deterministic:
seed_everything(args.seed, args.cudnn_deterministic)
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely disable ddp.')
torch.cuda.set_device(args.gpu)
args.ngpus_per_node = 1
args.world_size = 1
else:
if args.num_node == 1:
args.dist_url == "auto"
else:
assert args.num_node > 1
args.ngpus_per_node = torch.cuda.device_count()
args.world_size = args.ngpus_per_node * args.num_node
launch(main_worker, args.ngpus_per_node, args.num_node, args.node_rank, args.dist_url, args=(args,))
def main_worker(local_rank, args):
args.local_rank = local_rank
args.global_rank = args.local_rank + args.node_rank * args.ngpus_per_node
# load config
config = args
config = merge_opts_to_config(config, args.opts)
if args.debug:
config = modify_config_for_debug(config)
# get logger
logger = Logger(args)
# get model
model = build_model(config, args)
# print(model)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# get dataloader
dataloader_info = build_dataloader(config, args)
# get solver | solver = Solver(config=config, args=args, model=model, dataloader=dataloader_info, logger=logger) | 3 | 2023-12-05 02:42:28+00:00 | 12k |
mkang315/ASF-YOLO | models/yolo.py | [
{
"identifier": "check_anchor_order",
"path": "utils/autoanchor.py",
"snippet": "def check_anchor_order(m):\n # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary\n a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer\n da = a[-1] - a[0] # delta a\n ds = m.stride[-1] - m.stride[0] # delta s\n if da and (da.sign() != ds.sign()): # same order\n LOGGER.info(f'{PREFIX}Reversing anchor order')\n m.anchors[:] = m.anchors.flip(0)"
},
{
"identifier": "LOGGER",
"path": "utils/general.py",
"snippet": "LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)"
},
{
"identifier": "check_version",
"path": "utils/general.py",
"snippet": "def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\n # Check version vs. required version\n current, minimum = (pkg.parse_version(x) for x in (current, minimum))\n result = (current == minimum) if pinned else (current >= minimum) # bool\n s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string\n if hard:\n assert result, emojis(s) # assert min requirements met\n if verbose and not result:\n LOGGER.warning(s)\n return result"
},
{
"identifier": "check_yaml",
"path": "utils/general.py",
"snippet": "def check_yaml(file, suffix=('.yaml', '.yml')):\n # Search/download YAML file (if necessary) and return path, checking suffix\n return check_file(file, suffix)"
},
{
"identifier": "make_divisible",
"path": "utils/general.py",
"snippet": "def make_divisible(x, divisor):\n # Returns nearest x divisible by divisor\n if isinstance(divisor, torch.Tensor):\n divisor = int(divisor.max()) # to int\n return math.ceil(x / divisor) * divisor"
},
{
"identifier": "print_args",
"path": "utils/general.py",
"snippet": "def print_args(args: Optional[dict] = None, show_file=True, show_func=False):\n # Print function arguments (optional args dict)\n x = inspect.currentframe().f_back # previous frame\n file, _, func, _, _ = inspect.getframeinfo(x)\n if args is None: # get args automatically\n args, _, _, frm = inspect.getargvalues(x)\n args = {k: v for k, v in frm.items() if k in args}\n try:\n file = Path(file).resolve().relative_to(ROOT).with_suffix('')\n except ValueError:\n file = Path(file).stem\n s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')\n LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))"
},
{
"identifier": "feature_visualization",
"path": "utils/plots.py",
"snippet": "def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\n \"\"\"\n x: Features to be visualized\n module_type: Module type\n stage: Module stage within model\n n: Maximum number of feature maps to plot\n save_dir: Directory to save results\n \"\"\"\n if 'Detect' not in module_type:\n batch, channels, height, width = x.shape # batch, channels, height, width\n if height > 1 and width > 1:\n f = save_dir / f\"stage{stage}_{module_type.split('.')[-1]}_features.png\" # filename\n\n blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels\n n = min(n, channels) # number of plots\n fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols\n ax = ax.ravel()\n plt.subplots_adjust(wspace=0.05, hspace=0.05)\n for i in range(n):\n ax[i].imshow(blocks[i].squeeze()) # cmap='gray'\n ax[i].axis('off')\n\n LOGGER.info(f'Saving {f}... ({n}/{channels})')\n plt.savefig(f, dpi=300, bbox_inches='tight')\n plt.close()\n np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save"
},
{
"identifier": "fuse_conv_and_bn",
"path": "utils/torch_utils.py",
"snippet": "def fuse_conv_and_bn(conv, bn):\n # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n fusedconv = nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n dilation=conv.dilation,\n groups=conv.groups,\n bias=True).requires_grad_(False).to(conv.weight.device)\n\n # Prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))\n\n # Prepare spatial bias\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv"
},
{
"identifier": "initialize_weights",
"path": "utils/torch_utils.py",
"snippet": "def initialize_weights(model):\n for m in model.modules():\n t = type(m)\n if t is nn.Conv2d:\n pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif t is nn.BatchNorm2d:\n m.eps = 1e-3\n m.momentum = 0.03\n elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True"
},
{
"identifier": "model_info",
"path": "utils/torch_utils.py",
"snippet": "def model_info(model, verbose=False, imgsz=640):\n # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print(f\"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}\")\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPs\n p = next(model.parameters())\n stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride\n im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format\n flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs\n imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float\n fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs\n except Exception:\n fs = ''\n\n name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model'\n LOGGER.info(f\"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}\")"
},
{
"identifier": "profile",
"path": "utils/torch_utils.py",
"snippet": "def profile(input, ops, n=10, device=None):\n \"\"\" YOLOv5 speed/memory/FLOPs profiler\n Usage:\n input = torch.randn(16, 3, 640, 640)\n m1 = lambda x: x * torch.sigmoid(x)\n m2 = nn.SiLU()\n profile(input, [m1, m2], n=100) # profile over 100 iterations\n \"\"\"\n results = []\n if not isinstance(device, torch.device):\n device = select_device(device)\n print(f\"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}\"\n f\"{'input':>24s}{'output':>24s}\")\n\n for x in input if isinstance(input, list) else [input]:\n x = x.to(device)\n x.requires_grad = True\n for m in ops if isinstance(ops, list) else [ops]:\n m = m.to(device) if hasattr(m, 'to') else m # device\n m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m\n tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward\n try:\n flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs\n except Exception:\n flops = 0\n\n try:\n for _ in range(n):\n t[0] = time_sync()\n y = m(x)\n t[1] = time_sync()\n try:\n _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()\n t[2] = time_sync()\n except Exception: # no backward method\n # print(e) # for debug\n t[2] = float('nan')\n tf += (t[1] - t[0]) * 1000 / n # ms per op forward\n tb += (t[2] - t[1]) * 1000 / n # ms per op backward\n mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB)\n s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes\n p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters\n print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}')\n results.append([p, flops, mem, tf, tb, s_in, s_out])\n except Exception as e:\n print(e)\n results.append(None)\n torch.cuda.empty_cache()\n return results"
},
{
"identifier": "scale_img",
"path": "utils/torch_utils.py",
"snippet": "def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)\n # Scales img(bs,3,y,x) by ratio constrained to gs-multiple\n if ratio == 1.0:\n return img\n h, w = img.shape[2:]\n s = (int(h * ratio), int(w * ratio)) # new size\n img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize\n if not same_shape: # pad/crop img\n h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))\n return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean"
},
{
"identifier": "select_device",
"path": "utils/torch_utils.py",
"snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)"
},
{
"identifier": "time_sync",
"path": "utils/torch_utils.py",
"snippet": "def time_sync():\n # PyTorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()"
}
] | import argparse
import contextlib
import os
import platform
import sys
import thop # for FLOPs computation
import yaml # for torch hub
from copy import deepcopy
from pathlib import Path
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
from utils.plots import feature_visualization
from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
time_sync) | 7,866 | m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility
class SegmentationModel(DetectionModel):
# YOLOv5 segmentation model
def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
super().__init__(cfg, ch, nc, anchors)
class ClassificationModel(BaseModel):
# YOLOv5 classification model
def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index
super().__init__()
self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
def _from_detection_model(self, model, nc=1000, cutoff=10):
# Create a YOLOv5 classification model from a YOLOv5 detection model
if isinstance(model, DetectMultiBackend):
model = model.model # unwrap DetectMultiBackend
model.model = model.model[:cutoff] # backbone
m = model.model[-1] # last layer
ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module
c = Classify(ch, nc) # Classify()
c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type
model.model[-1] = c # replace
self.model = model.model
self.stride = model.stride
self.save = []
self.nc = nc
def _from_yaml(self, cfg):
# Create a YOLOv5 classification model from a *.yaml file
self.model = None
def parse_model(d, ch): # model_dict, input_channels(3)
# Parse a YOLOv5 model.yaml dictionary
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
if act:
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
LOGGER.info(f"{colorstr('activation:')} {act}") # print
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
with contextlib.suppress(NameError):
args[j] = eval(a) if isinstance(a, str) else a # eval strings
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in {
Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x, DownSample}:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum(ch[x] for x in f)
elif m is ScalSeq:
c2 = args[0]
elif m is Add:
c2 = args[0]
elif m is Zoom_cat:
c2 = 3*args[0]
elif m is attention_model:
c2 = args[0]
# TODO: channel, gw, gd
elif m in {Detect, Segment}:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
if m is Segment:
args[3] = make_divisible(args[3] * gw, 8)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum(x.numel() for x in m_.parameters()) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--profile', action='store_true', help='profile model speed')
parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')
parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')
opt = parser.parse_args()
| # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
YOLO-specific modules
Usage:
$ python models/yolo.py --cfg yolov5s.yaml
"""
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
if platform.system() != 'Windows':
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
try:
except ImportError:
thop = None
class Detect(nn.Module):
# YOLOv5 Detect head for detection models
stride = None # strides computed during build
dynamic = False # force grid reconstruction
export = False # export mode
def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
super().__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid
self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid
self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
self.inplace = inplace # use inplace ops (e.g. slice assignment)
def forward(self, x):
z = [] # inference output
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
if isinstance(self, Segment): # (boxes + masks)
xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy
wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh
y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
else: # Detect (boxes only)
xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
y = torch.cat((xy, wh, conf), 4)
z.append(y.view(bs, self.na * nx * ny, self.no))
return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
d = self.anchors[i].device
t = self.anchors[i].dtype
shape = 1, self.na, ny, nx, 2 # grid shape
y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
return grid, anchor_grid
class Segment(Detect):
# YOLOv5 Segment head for segmentation models
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
super().__init__(nc, anchors, ch, inplace)
self.nm = nm # number of masks
self.npr = npr # number of protos
self.no = 5 + nc + self.nm # number of outputs per anchor
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
self.proto = Proto(ch[0], self.npr, self.nm) # protos
self.detect = Detect.forward
def forward(self, x):
p = self.proto(x[0])
x = self.detect(self, x)
return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
class BaseModel(nn.Module):
# YOLOv5 base model
def forward(self, x, profile=False, visualize=False):
return self._forward_once(x, profile, visualize) # single-scale inference, train
def _forward_once(self, x, profile=False, visualize=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
self._profile_one_layer(m, x, dt)
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if visualize:
feature_visualization(x, m.type, m.i, save_dir=visualize)
return x
def _profile_one_layer(self, m, x, dt):
c = m == self.model[-1] # is final layer, copy input as inplace fix
o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
t = time_sync()
for _ in range(10):
m(x.copy() if c else x)
dt.append((time_sync() - t) * 100)
if m == self.model[0]:
LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
if c:
LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
LOGGER.info('Fusing layers... ')
for m in self.model.modules():
if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.forward_fuse # update forward
self.info()
return self
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
def _apply(self, fn):
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
self = super()._apply(fn)
m = self.model[-1] # Detect()
if isinstance(m, (Detect, Segment)):
m.stride = fn(m.stride)
m.grid = list(map(fn, m.grid))
if isinstance(m.anchor_grid, list):
m.anchor_grid = list(map(fn, m.anchor_grid))
return self
class DetectionModel(BaseModel):
# YOLOv5 detection model
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super().__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
self.yaml_file = Path(cfg).name
with open(cfg, encoding='ascii', errors='ignore') as f:
self.yaml = yaml.safe_load(f) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
self.inplace = self.yaml.get('inplace', True)
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, (Detect, Segment)):
s = 256 # 2x min stride
m.inplace = self.inplace
forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
check_anchor_order(m)
m.anchors /= m.stride.view(-1, 1, 1)
self.stride = m.stride
self._initialize_biases() # only run once
# Init weights, biases
initialize_weights(self)
self.info()
LOGGER.info('')
def forward(self, x, augment=False, profile=False, visualize=False):
if augment:
return self._forward_augment(x) # augmented inference, None
return self._forward_once(x, profile, visualize) # single-scale inference, train
def _forward_augment(self, x):
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self._forward_once(xi)[0] # forward
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi = self._descale_pred(yi, fi, si, img_size)
y.append(yi)
y = self._clip_augmented(y) # clip augmented tails
return torch.cat(y, 1), None # augmented inference, train
def _descale_pred(self, p, flips, scale, img_size):
# de-scale predictions following augmented inference (inverse operation)
if self.inplace:
p[..., :4] /= scale # de-scale
if flips == 2:
p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
elif flips == 3:
p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
else:
x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
if flips == 2:
y = img_size[0] - y # de-flip ud
elif flips == 3:
x = img_size[1] - x # de-flip lr
p = torch.cat((x, y, wh, p[..., 4:]), -1)
return p
def _clip_augmented(self, y):
# Clip YOLOv5 augmented inference tails
nl = self.model[-1].nl # number of detection layers (P3-P5)
g = sum(4 ** x for x in range(nl)) # grid points
e = 1 # exclude layer count
i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices
y[0] = y[0][:, :-i] # large
i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices
y[-1] = y[-1][:, i:] # small
return y
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility
class SegmentationModel(DetectionModel):
# YOLOv5 segmentation model
def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
super().__init__(cfg, ch, nc, anchors)
class ClassificationModel(BaseModel):
# YOLOv5 classification model
def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index
super().__init__()
self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
def _from_detection_model(self, model, nc=1000, cutoff=10):
# Create a YOLOv5 classification model from a YOLOv5 detection model
if isinstance(model, DetectMultiBackend):
model = model.model # unwrap DetectMultiBackend
model.model = model.model[:cutoff] # backbone
m = model.model[-1] # last layer
ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module
c = Classify(ch, nc) # Classify()
c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type
model.model[-1] = c # replace
self.model = model.model
self.stride = model.stride
self.save = []
self.nc = nc
def _from_yaml(self, cfg):
# Create a YOLOv5 classification model from a *.yaml file
self.model = None
def parse_model(d, ch): # model_dict, input_channels(3)
# Parse a YOLOv5 model.yaml dictionary
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
if act:
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
LOGGER.info(f"{colorstr('activation:')} {act}") # print
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
with contextlib.suppress(NameError):
args[j] = eval(a) if isinstance(a, str) else a # eval strings
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in {
Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x, DownSample}:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum(ch[x] for x in f)
elif m is ScalSeq:
c2 = args[0]
elif m is Add:
c2 = args[0]
elif m is Zoom_cat:
c2 = 3*args[0]
elif m is attention_model:
c2 = args[0]
# TODO: channel, gw, gd
elif m in {Detect, Segment}:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
if m is Segment:
args[3] = make_divisible(args[3] * gw, 8)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum(x.numel() for x in m_.parameters()) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--profile', action='store_true', help='profile model speed')
parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')
parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')
opt = parser.parse_args() | opt.cfg = check_yaml(opt.cfg) # check YAML | 3 | 2023-12-10 14:18:29+00:00 | 12k |
ylacombe/finetune-hf-vits | convert_original_discriminator_checkpoint.py | [
{
"identifier": "VitsFeatureExtractor",
"path": "utils/feature_extraction_vits.py",
"snippet": "class VitsFeatureExtractor(SequenceFeatureExtractor):\n r\"\"\"\n Constructs a Vits feature extractor.\n\n This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains\n most of the main methods. Users should refer to this superclass for more information regarding those methods.\n\n This class extracts `Short Time Fourier Transform` from raw speech using a custom numpy implementation which should\n match pytorch's `torch.stft`.\n\n Args:\n feature_size (`int`, defaults to 80):\n The feature dimension of the extracted features.\n sampling_rate (`int`, defaults to 22050):\n The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).\n hop_length (`int`, defaults to 256):\n Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients.\n n_fft (`int`, defaults to 1024):\n Size of the Fourier transform.\n padding_value (`float`, *optional*, defaults to 0.0):\n Padding value used to pad the audio. Should correspond to silences.\n return_attention_mask (`bool`, *optional*, defaults to `False`):\n Whether to return the attention mask.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip>\n\n For Vits finetuning, `attention_mask` should always be passed for batched inference, to avoid subtle bugs.\n\n </Tip>\n\n max_wav_value (`float`, defaults to 32768.0):\n Maximum wav value. Used to normalize the input waveforms if `do_normalize=True` in the forward pass of this\n feature extractor.\n \"\"\"\n\n model_input_names = [\"input_features\"]\n\n def __init__(\n self,\n feature_size=80,\n sampling_rate=22050,\n hop_length=256,\n n_fft=1024,\n padding_value=0.0,\n return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask,\n max_wav_value=32768.0,\n **kwargs,\n ):\n super().__init__(\n feature_size=feature_size,\n sampling_rate=sampling_rate,\n padding_value=padding_value,\n return_attention_mask=return_attention_mask,\n **kwargs,\n )\n self.n_fft = n_fft\n self.hop_length = hop_length\n self.sampling_rate = sampling_rate\n self.mel_filters = mel_filter_bank(\n num_frequency_bins=1 + n_fft // 2,\n num_mel_filters=feature_size,\n min_frequency=0.0,\n max_frequency=sampling_rate // 2,\n sampling_rate=sampling_rate,\n norm=\"slaney\",\n mel_scale=\"slaney\",\n )\n self.max_wav_value = max_wav_value\n\n def _torch_extract_fbank_features(self, waveform: np.array) -> Tuple[torch.Tensor]:\n \"\"\"\n Compute the log-mel spectrogram of the provided audio using the PyTorch STFT implementation.\n \"\"\"\n if len(waveform.shape) == 1:\n waveform = waveform.unsqueeze(0)\n\n waveform = torch.nn.functional.pad(\n waveform,\n (int((self.n_fft - self.hop_length) / 2), int((self.n_fft - self.hop_length) / 2)),\n mode=\"reflect\",\n )\n\n window = torch.hann_window(self.n_fft).to(waveform.device)\n stft = torch.stft(\n waveform,\n self.n_fft,\n hop_length=self.hop_length,\n win_length=self.n_fft,\n window=window,\n center=False,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n magnitudes = torch.sqrt(stft.pow(2).sum(-1) + 1e-6)\n\n mel_filters = torch.from_numpy(self.mel_filters).type(torch.float32).to(waveform.device)\n mel_spec = mel_filters.T @ magnitudes\n\n log_spec = torch.clamp(mel_spec, min=1e-5).log()\n return magnitudes, log_spec\n\n def __call__(\n self,\n raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],\n truncation: bool = False,\n pad_to_multiple_of: Optional[int] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n return_attention_mask: Optional[bool] = True,\n padding: Optional[str] = True,\n max_length: Optional[int] = None,\n sampling_rate: Optional[int] = None,\n do_normalize: Optional[bool] = None,\n **kwargs,\n ) -> BatchFeature:\n \"\"\"\n Main method to featurize and prepare for the model one or several sequence(s).\n\n Args:\n raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):\n The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float\n values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not\n stereo, i.e. single float per timestep.\n truncation (`bool`, *optional*, default to `False`):\n Activates truncation to cut input sequences longer than *max_length* to *max_length*.\n pad_to_multiple_of (`int`, *optional*, defaults to None):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability\n `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*, defaults to `True`):\n Whether to return the attention mask. If left to the default, will return the attention mask according\n to the specific feature_extractor's default.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip>\n\n For Vits finetuning, `attention_mask` should always be passed for batched inference, to avoid subtle\n bugs.\n\n </Tip>\n\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding\n index) among:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n sampling_rate (`int`, *optional*):\n The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass\n `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition\n pipeline.\n do_normalize (`bool`, *optional*):\n Whether or not to divide the input waveform by `self.max_wav_value`.\n \"\"\"\n\n if sampling_rate is not None:\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n f\"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a\"\n f\" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input\"\n f\" was sampled with {self.sampling_rate} and not {sampling_rate}.\"\n )\n else:\n logger.warning(\n \"It is strongly recommended to pass the `sampling_rate` argument to this function. \"\n \"Failing to do so can result in silent errors that might be hard to debug.\"\n )\n\n is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1\n if is_batched_numpy and len(raw_speech.shape) > 2:\n raise ValueError(f\"Only mono-channel audio is supported for input to {self}\")\n is_batched = is_batched_numpy or (\n isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))\n )\n\n if is_batched:\n raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech]\n elif not is_batched and not isinstance(raw_speech, np.ndarray):\n raw_speech = np.asarray(raw_speech, dtype=np.float32)\n elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):\n raw_speech = raw_speech.astype(np.float32)\n\n # always return batch\n if not is_batched:\n raw_speech = [np.asarray([raw_speech]).T]\n\n if self.max_wav_value is not None and do_normalize:\n raw_speech = [\n speech if self.max_wav_value is None else speech / self.max_wav_value for speech in raw_speech\n ]\n\n batched_speech = BatchFeature({\"input_features\": raw_speech})\n\n # convert into correct format for padding\n padded_inputs = self.pad(\n batched_speech,\n padding=padding,\n max_length=max_length,\n truncation=truncation,\n pad_to_multiple_of=pad_to_multiple_of,\n return_attention_mask=return_attention_mask or do_normalize,\n return_tensors=\"pt\",\n )\n\n # make sure list is in array format\n if isinstance(padded_inputs.get(\"input_features\"),list):\n input_features = torch.tensor(padded_inputs.get(\"input_features\")).transpose(1, 2).transpose(0, 1)\n else:\n input_features = padded_inputs.get(\"input_features\").clone().detach().transpose(1, 2).transpose(0, 1)\n\n\n input_features = self._torch_extract_fbank_features(input_features[0])\n\n mel_scaled_input_features = input_features[1]\n input_features = input_features[0]\n\n padded_inputs[\"input_features\"] = input_features\n padded_inputs[\"mel_scaled_input_features\"] = mel_scaled_input_features\n\n if return_attention_mask:\n # rescale from sample (48000) to feature (3000)\n padded_inputs[\"attention_mask\"] = padded_inputs[\"attention_mask\"][:, :: self.hop_length]\n\n if return_tensors is not None:\n padded_inputs = padded_inputs.convert_to_tensors(return_tensors)\n\n return padded_inputs\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes this instance to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"feature_extractor_type\"] = self.__class__.__name__\n if \"mel_filters\" in output:\n del output[\"mel_filters\"]\n return output"
},
{
"identifier": "VitsConfig",
"path": "utils/configuration_vits.py",
"snippet": "VITS_PRETRAINED_CONFIG_ARCHIVE_MAP = {\n \"facebook/mms-tts-eng\": \"https://huggingface.co/facebook/mms-tts-eng/resolve/main/config.json\",\n}\nclass VitsConfig(PretrainedConfig):\n def __init__(\n self,\n vocab_size=38,\n hidden_size=192,\n num_hidden_layers=6,\n num_attention_heads=2,\n window_size=4,\n use_bias=True,\n ffn_dim=768,\n layerdrop=0.1,\n ffn_kernel_size=3,\n flow_size=192,\n spectrogram_bins=513,\n hidden_act=\"relu\",\n hidden_dropout=0.1,\n attention_dropout=0.1,\n activation_dropout=0.1,\n initializer_range=0.02,\n layer_norm_eps=1e-5,\n use_stochastic_duration_prediction=True,\n num_speakers=1,\n speaker_embedding_size=0,\n upsample_initial_channel=512,\n upsample_rates=[8, 8, 2, 2],\n upsample_kernel_sizes=[16, 16, 4, 4],\n resblock_kernel_sizes=[3, 7, 11],\n resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],\n leaky_relu_slope=0.1,\n depth_separable_channels=2,\n depth_separable_num_layers=3,\n duration_predictor_flow_bins=10,\n duration_predictor_tail_bound=5.0,\n duration_predictor_kernel_size=3,\n duration_predictor_dropout=0.5,\n duration_predictor_num_flows=4,\n duration_predictor_filter_channels=256,\n prior_encoder_num_flows=4,\n prior_encoder_num_wavenet_layers=4,\n posterior_encoder_num_wavenet_layers=16,\n wavenet_kernel_size=5,\n wavenet_dilation_rate=1,\n wavenet_dropout=0.0,\n speaking_rate=1.0,\n noise_scale=0.667,\n noise_scale_duration=0.8,\n sampling_rate=16_000,\n discriminator_kernel_size=5,\n discriminator_stride=3,\n discriminator_periods=[2, 3, 5, 7, 11],\n discriminator_period_channels=[1, 32, 128, 512, 1024],\n discriminator_scale_channels=[1, 16, 64, 256, 1024],\n segment_size=8192,\n hop_length=256,\n **kwargs,\n ):"
},
{
"identifier": "VitsDiscriminator",
"path": "utils/modeling_vits_training.py",
"snippet": "class VitsDiscriminator(VitsPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if config.discriminator_scale_channels is not None:\n self.discriminators = nn.ModuleList(\n [VitsHifiGanDiscriminatorScaleResidualBlock(config.discriminator_scale_channels, config.leaky_relu_slope)]\n )\n else:\n self.discriminators = nn.ModuleList([])\n \n self.discriminators.extend(\n [\n VitsHifiGanDiscriminatorPeriodResidualBlock(\n config.discriminator_period_channels,\n period,\n config.discriminator_kernel_size,\n config.discriminator_stride,\n config.leaky_relu_slope,\n )\n for period in config.discriminator_periods\n ]\n )\n\n def forward(self, hidden_states):\n fmaps = []\n discriminated_hidden_states_list = []\n\n for discriminator in self.discriminators:\n discriminated_hidden_states, fmap = discriminator(hidden_states)\n fmaps.append(fmap)\n discriminated_hidden_states_list.append(discriminated_hidden_states)\n\n return discriminated_hidden_states_list, fmaps\n\n def apply_weight_norm(self):\n for disc in self.discriminators:\n disc.apply_weight_norm()\n\n def remove_weight_norm(self):\n for disc in self.discriminators:\n disc.remove_weight_norm()"
},
{
"identifier": "VitsModelForPreTraining",
"path": "utils/modeling_vits_training.py",
"snippet": "class VitsModelForPreTraining(VitsPreTrainedModel):\n def __init__(self, config: VitsConfig):\n super().__init__(config)\n self.config = config\n self.text_encoder = VitsTextEncoder(config)\n self.flow = VitsResidualCouplingBlock(config)\n self.decoder = VitsHifiGan(config)\n\n if config.use_stochastic_duration_prediction:\n self.duration_predictor = VitsStochasticDurationPredictor(config)\n else:\n self.duration_predictor = VitsDurationPredictor(config)\n\n if config.num_speakers > 1:\n self.embed_speaker = nn.Embedding(config.num_speakers, config.speaker_embedding_size)\n\n # This is used only for training.\n self.posterior_encoder = VitsPosteriorEncoder(config)\n self.discriminator = VitsDiscriminator(config)\n\n # These parameters control the synthesised speech properties\n self.speaking_rate = config.speaking_rate\n self.noise_scale = config.noise_scale\n self.noise_scale_duration = config.noise_scale_duration\n self.segment_size = self.config.segment_size // self.config.hop_length\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def resize_speaker_embeddings(\n self,\n new_num_speakers: int,\n speaker_embedding_size: Optional[int] = None,\n pad_to_multiple_of: Optional[int] = 2,\n ):\n if pad_to_multiple_of is not None:\n new_num_speakers = ((new_num_speakers + pad_to_multiple_of - 1) // pad_to_multiple_of) * pad_to_multiple_of\n\n # first, take care of embed_speaker\n if self.config.num_speakers <= 1:\n if speaker_embedding_size is None:\n raise ValueError(\n \"The current model had no previous speaker embedding, but `speaker_embedding_size` is not specified. Pass `speaker_embedding_size` to this method.\"\n )\n # create new embedding layer\n new_embeddings = nn.Embedding(\n new_num_speakers,\n speaker_embedding_size,\n device=self.device,\n )\n # initialize all new embeddings\n self._init_weights(new_embeddings)\n else:\n new_embeddings = self._get_resized_embeddings(self.embed_speaker, new_num_speakers)\n\n self.embed_speaker = new_embeddings\n\n # then take care of sub-models\n self.flow.resize_speaker_embeddings(speaker_embedding_size)\n for flow in self.flow.flows:\n self._init_weights(flow.wavenet.cond_layer)\n\n self.decoder.resize_speaker_embedding(speaker_embedding_size)\n self._init_weights(self.decoder.cond)\n\n self.duration_predictor.resize_speaker_embeddings(speaker_embedding_size)\n self._init_weights(self.duration_predictor.cond)\n\n self.posterior_encoder.resize_speaker_embeddings(speaker_embedding_size)\n self._init_weights(self.posterior_encoder.wavenet.cond_layer)\n\n self.config.num_speakers = new_num_speakers\n self.config.speaker_embedding_size = speaker_embedding_size\n\n def get_input_embeddings(self):\n return self.text_encoder.get_input_embeddings()\n\n def set_input_embeddings(self, value):\n self.text_encoder.set_input_embeddings(value)\n\n def apply_weight_norm(self):\n self.decoder.apply_weight_norm()\n self.flow.apply_weight_norm()\n self.posterior_encoder.apply_weight_norm()\n\n def remove_weight_norm(self):\n self.decoder.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.posterior_encoder.remove_weight_norm()\n\n def discriminate(self, hidden_states):\n return self.discriminator(hidden_states)\n\n def get_encoder(self):\n return self.text_encoder\n\n def _inference_forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n speaker_embeddings: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n padding_mask: Optional[torch.Tensor] = None,\n ):\n text_encoder_output = self.text_encoder(\n input_ids=input_ids,\n padding_mask=padding_mask,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = text_encoder_output[0] if not return_dict else text_encoder_output.last_hidden_state\n hidden_states = hidden_states.transpose(1, 2)\n input_padding_mask = padding_mask.transpose(1, 2)\n prior_means = text_encoder_output[1] if not return_dict else text_encoder_output.prior_means\n prior_log_variances = text_encoder_output[2] if not return_dict else text_encoder_output.prior_log_variances\n\n if self.config.use_stochastic_duration_prediction:\n log_duration = self.duration_predictor(\n hidden_states,\n input_padding_mask,\n speaker_embeddings,\n reverse=True,\n noise_scale=self.noise_scale_duration,\n )\n else:\n log_duration = self.duration_predictor(hidden_states, input_padding_mask, speaker_embeddings)\n\n length_scale = 1.0 / self.speaking_rate\n duration = torch.ceil(torch.exp(log_duration) * input_padding_mask * length_scale)\n predicted_lengths = torch.clamp_min(torch.sum(duration, [1, 2]), 1).long()\n\n # Create a padding mask for the output lengths of shape (batch, 1, max_output_length)\n indices = torch.arange(predicted_lengths.max(), dtype=predicted_lengths.dtype, device=predicted_lengths.device)\n output_padding_mask = indices.unsqueeze(0) < predicted_lengths.unsqueeze(1)\n output_padding_mask = output_padding_mask.unsqueeze(1).to(input_padding_mask.dtype)\n\n # Reconstruct an attention tensor of shape (batch, 1, out_length, in_length)\n attn_mask = torch.unsqueeze(input_padding_mask, 2) * torch.unsqueeze(output_padding_mask, -1)\n batch_size, _, output_length, input_length = attn_mask.shape\n cum_duration = torch.cumsum(duration, -1).view(batch_size * input_length, 1)\n indices = torch.arange(output_length, dtype=duration.dtype, device=duration.device)\n valid_indices = indices.unsqueeze(0) < cum_duration\n valid_indices = valid_indices.to(attn_mask.dtype).view(batch_size, input_length, output_length)\n padded_indices = valid_indices - nn.functional.pad(valid_indices, [0, 0, 1, 0, 0, 0])[:, :-1]\n attn = padded_indices.unsqueeze(1).transpose(2, 3) * attn_mask\n\n # Expand prior distribution\n prior_means = torch.matmul(attn.squeeze(1), prior_means).transpose(1, 2)\n prior_log_variances = torch.matmul(attn.squeeze(1), prior_log_variances).transpose(1, 2)\n\n prior_latents = prior_means + torch.randn_like(prior_means) * torch.exp(prior_log_variances) * self.noise_scale\n latents = self.flow(prior_latents, output_padding_mask, speaker_embeddings, reverse=True)\n\n spectrogram = latents * output_padding_mask\n waveform = self.decoder(spectrogram, speaker_embeddings)\n waveform = waveform.squeeze(1)\n sequence_lengths = predicted_lengths * np.prod(self.config.upsample_rates)\n\n if not return_dict:\n outputs = (waveform, sequence_lengths, spectrogram) + text_encoder_output[3:]\n return outputs\n\n return VitsModelOutput(\n waveform=waveform,\n sequence_lengths=sequence_lengths,\n spectrogram=spectrogram,\n hidden_states=text_encoder_output.hidden_states,\n attentions=text_encoder_output.attentions,\n )\n\n @add_start_docstrings_to_model_forward(VITS_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=VitsModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n speaker_id: Optional[int] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.FloatTensor] = None,\n labels_attention_mask: Optional[torch.Tensor] = None,\n monotonic_alignment_function: Optional[Callable] = None,\n ) -> Union[Tuple[Any], VitsModelOutput]:\n r\"\"\"\n labels (`torch.FloatTensor` of shape `(batch_size, config.spectrogram_bins, sequence_length)`, *optional*):\n Float values of target spectrogram.\n labels_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on labels. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n monotonic_alignment_function (`Callable`, *optional*):\n Monotonic alignment function. Used for training, i.e when `labels` are provided. By default, it will use a\n Pytorch implementation of the monotonic alignment function which is awfully slow. An alternative relying on\n cython is proposed in examples/pytorch/text-to-speech/run_vits_finetuning.py\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import VitsTokenizer, VitsModelForPreTraining, set_seed\n >>> import torch\n\n >>> tokenizer = VitsTokenizer.from_pretrained(\"facebook/mms-tts-eng\")\n >>> model = VitsModelForPreTraining.from_pretrained(\"facebook/mms-tts-eng\")\n\n >>> inputs = tokenizer(text=\"Hello - my dog is cute\", return_tensors=\"pt\")\n\n >>> set_seed(555) # make deterministic\n\n >>> with torch.no_grad():\n ... outputs = model(inputs[\"input_ids\"])\n >>> outputs.waveform.shape\n torch.Size([1, 45824])\n ```\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n monotonic_alignment_function = (\n monotonic_align_max_path if monotonic_alignment_function is None else monotonic_alignment_function\n )\n\n if attention_mask is not None:\n input_padding_mask = attention_mask.unsqueeze(-1).float()\n else:\n input_padding_mask = torch.ones_like(input_ids).unsqueeze(-1).float()\n\n if self.config.num_speakers > 1 and speaker_id is not None:\n if isinstance(speaker_id, int):\n speaker_id = torch.full(size=(1,), fill_value=speaker_id, device=self.device)\n elif isinstance(speaker_id, (list, tuple, np.ndarray)):\n speaker_id = torch.tensor(speaker_id, device=self.device)\n\n if not ((0 <= speaker_id).all() and (speaker_id < self.config.num_speakers).all()).item():\n raise ValueError(f\"Set `speaker_id` in the range 0-{self.config.num_speakers - 1}.\")\n if not (len(speaker_id) == 1 or len(speaker_id == len(input_ids))):\n raise ValueError(\n f\"You passed {len(speaker_id)} `speaker_id` but you should either pass one speaker id or `batch_size` `speaker_id`.\"\n )\n\n speaker_embeddings = self.embed_speaker(speaker_id).unsqueeze(-1)\n else:\n speaker_embeddings = None\n\n # if inference, return inference forward of VitsModel\n if labels is None:\n return self._inference_forward(\n input_ids,\n attention_mask,\n speaker_embeddings,\n output_attentions,\n output_hidden_states,\n return_dict,\n input_padding_mask,\n )\n\n if labels_attention_mask is not None:\n labels_padding_mask = labels_attention_mask.unsqueeze(1).float()\n else:\n labels_attention_mask = torch.ones((labels.shape[0], labels.shape[2])).float().to(self.device)\n labels_padding_mask = labels_attention_mask.unsqueeze(1)\n\n text_encoder_output = self.text_encoder(\n input_ids=input_ids,\n padding_mask=input_padding_mask,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = text_encoder_output[0] if not return_dict else text_encoder_output.last_hidden_state\n hidden_states = hidden_states.transpose(1, 2)\n input_padding_mask = input_padding_mask.transpose(1, 2)\n prior_means = text_encoder_output[1] if not return_dict else text_encoder_output.prior_means\n prior_log_variances = text_encoder_output[2] if not return_dict else text_encoder_output.prior_log_variances\n\n latents, posterior_means, posterior_log_variances = self.posterior_encoder(\n labels, labels_padding_mask, speaker_embeddings\n )\n prior_latents = self.flow(latents, labels_padding_mask, speaker_embeddings, reverse=False)\n\n prior_means, prior_log_variances = prior_means.transpose(1, 2), prior_log_variances.transpose(1, 2)\n with torch.no_grad():\n # negative cross-entropy\n\n # [batch_size, d, latent_length]\n prior_variances = torch.exp(-2 * prior_log_variances)\n # [batch_size, 1, latent_length]\n neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - prior_log_variances, [1], keepdim=True)\n # [batch_size, text_length, d] x [batch_size, d, latent_length] = [batch_size, text_length, latent_length]\n neg_cent2 = torch.matmul(-0.5 * (prior_latents**2).transpose(1, 2), prior_variances)\n # [batch_size, text_length, d] x [batch_size, d, latent_length] = [batch_size, text_length, latent_length]\n neg_cent3 = torch.matmul(prior_latents.transpose(1, 2), (prior_means * prior_variances))\n # [batch_size, 1, latent_length]\n neg_cent4 = torch.sum(-0.5 * (prior_means**2) * prior_variances, [1], keepdim=True)\n\n # [batch_size, text_length, latent_length]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n\n attn_mask = torch.unsqueeze(input_padding_mask, 2) * torch.unsqueeze(labels_padding_mask, -1)\n\n attn = monotonic_alignment_function(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()\n\n durations = attn.sum(2)\n\n if self.config.use_stochastic_duration_prediction:\n log_duration = self.duration_predictor(\n hidden_states, input_padding_mask, speaker_embeddings, durations=durations, reverse=False\n )\n log_duration = log_duration / torch.sum(input_padding_mask)\n else:\n log_duration_padded = torch.log(durations + 1e-6) * input_padding_mask\n log_duration = self.duration_predictor(hidden_states, input_padding_mask, speaker_embeddings)\n log_duration = torch.sum((log_duration - log_duration_padded) ** 2, [1, 2]) / torch.sum(input_padding_mask)\n\n # expand priors\n prior_means = torch.matmul(attn.squeeze(1), prior_means.transpose(1, 2)).transpose(1, 2)\n prior_log_variances = torch.matmul(attn.squeeze(1), prior_log_variances.transpose(1, 2)).transpose(1, 2)\n\n label_lengths = labels_attention_mask.sum(dim=1)\n latents_slice, ids_slice = rand_slice_segments(latents, label_lengths, segment_size=self.segment_size)\n\n waveform = self.decoder(latents_slice, speaker_embeddings)\n\n if not return_dict:\n outputs = (\n waveform,\n log_duration,\n attn,\n ids_slice,\n input_padding_mask,\n labels_padding_mask,\n latents,\n prior_latents,\n prior_means,\n prior_log_variances,\n posterior_means,\n posterior_log_variances,\n )\n return outputs\n\n return VitsTrainingOutput(\n waveform=waveform,\n log_duration=log_duration,\n attn=attn,\n ids_slice=ids_slice,\n input_padding_mask=input_padding_mask,\n labels_padding_mask=labels_padding_mask,\n latents=latents,\n prior_latents=prior_latents,\n prior_means=prior_means,\n prior_log_variances=prior_log_variances,\n posterior_means=posterior_means,\n posterior_log_variances=posterior_log_variances,\n )"
}
] | import argparse
import torch
from transformers.models.vits.modeling_vits import VitsModel
from transformers.models.vits.tokenization_vits import VitsTokenizer
from huggingface_hub import hf_hub_download
from utils.feature_extraction_vits import VitsFeatureExtractor
from utils.configuration_vits import VitsConfig, logging
from utils.modeling_vits_training import VitsDiscriminator, VitsModelForPreTraining | 8,048 | """Convert VITS discriminator checkpoint and add it to an already converted VITS checkpoint."""
logging.set_verbosity_info()
logger = logging.get_logger("transformers.models.vits")
MAPPING = {
"conv_post": "final_conv",
}
TOP_LEVEL_KEYS = []
IGNORE_KEYS = []
@torch.no_grad()
def convert_checkpoint(
language_code,
pytorch_dump_folder_path,
checkpoint_path=None,
generator_checkpoint_path=None,
repo_id=None,
):
"""
Copy/paste/tweak model's weights to transformers design.
"""
if language_code is not None:
checkpoint_path = hf_hub_download(repo_id="facebook/mms-tts", subfolder=f"full_models/{language_code}", filename="D_100000.pth")
generator_checkpoint_path = f"facebook/mms-tts-{language_code}"
| """Convert VITS discriminator checkpoint and add it to an already converted VITS checkpoint."""
logging.set_verbosity_info()
logger = logging.get_logger("transformers.models.vits")
MAPPING = {
"conv_post": "final_conv",
}
TOP_LEVEL_KEYS = []
IGNORE_KEYS = []
@torch.no_grad()
def convert_checkpoint(
language_code,
pytorch_dump_folder_path,
checkpoint_path=None,
generator_checkpoint_path=None,
repo_id=None,
):
"""
Copy/paste/tweak model's weights to transformers design.
"""
if language_code is not None:
checkpoint_path = hf_hub_download(repo_id="facebook/mms-tts", subfolder=f"full_models/{language_code}", filename="D_100000.pth")
generator_checkpoint_path = f"facebook/mms-tts-{language_code}"
| config = VitsConfig.from_pretrained(generator_checkpoint_path) | 1 | 2023-12-11 17:56:49+00:00 | 12k |
youngskkim/CRN | exps/det/CRN_r18_256x704_128x128_4key.py | [
{
"identifier": "synchronize",
"path": "utils/torch_dist.py",
"snippet": "def synchronize():\n \"\"\"Helper function to synchronize (barrier)\n among all processes when using distributed training\"\"\"\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n current_world_size = dist.get_world_size()\n if current_world_size == 1:\n return\n dist.barrier()"
},
{
"identifier": "run_cli",
"path": "exps/base_cli.py",
"snippet": "def run_cli(model_class=BEVDepthLightningModel,\n exp_name='base_exp',\n use_ema=False,\n ckpt_path=None):\n parent_parser = ArgumentParser(add_help=False)\n parent_parser = pl.Trainer.add_argparse_args(parent_parser)\n parent_parser.add_argument('-e',\n '--evaluate',\n dest='evaluate',\n action='store_true',\n help='evaluate model on validation set')\n parent_parser.add_argument('-p',\n '--predict',\n dest='predict',\n action='store_true',\n help='predict model on testing set')\n parent_parser.add_argument('-b', '--batch_size_per_device', type=int)\n parent_parser.add_argument('--seed',\n type=int,\n default=0,\n help='seed for initializing training.')\n parent_parser.add_argument('--ckpt_path', type=str)\n parser = BEVDepthLightningModel.add_model_specific_args(parent_parser)\n parser.set_defaults(profiler='simple',\n deterministic=False,\n max_epochs=24,\n strategy='ddp',\n # strategy='ddp_find_unused_parameters_false',\n num_sanity_val_steps=0,\n check_val_every_n_epoch=1,\n gradient_clip_val=5,\n limit_val_batches=0.25,\n log_every_n_steps=50,\n enable_checkpointing=True,\n precision=16,\n default_root_dir=os.path.join('./outputs/', exp_name))\n args = parser.parse_args()\n if args.seed is not None:\n pl.seed_everything(args.seed)\n\n model = model_class(**vars(args))\n if use_ema:\n train_dataloader = model.train_dataloader()\n ema_callback = EMACallback(\n len(train_dataloader.dataset) * args.max_epochs)\n trainer = pl.Trainer.from_argparse_args(args, callbacks=[ema_callback, ModelSummary(max_depth=3)])\n else:\n trainer = pl.Trainer.from_argparse_args(args, callbacks=[ModelSummary(max_depth=3)])\n if args.evaluate:\n trainer.test(model, ckpt_path=args.ckpt_path)\n elif args.predict:\n predict_step_outputs = trainer.predict(model, ckpt_path=args.ckpt_path)\n all_pred_results = list()\n all_img_metas = list()\n for predict_step_output in predict_step_outputs:\n for i in range(len(predict_step_output)):\n all_pred_results.append(predict_step_output[i][:3])\n all_img_metas.append(predict_step_output[i][3])\n synchronize()\n len_dataset = len(model.test_dataloader().dataset)\n all_pred_results = sum(\n map(list, zip(*all_gather_object(all_pred_results))),\n [])[:len_dataset]\n all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))),\n [])[:len_dataset]\n model.evaluator._format_bbox(all_pred_results, all_img_metas,\n os.path.dirname(args.ckpt_path))\n else:\n if ckpt_path:\n trainer.fit(model, ckpt_path=ckpt_path)\n else:\n trainer.fit(model)"
},
{
"identifier": "BEVDepthLightningModel",
"path": "exps/base_exp.py",
"snippet": "class BEVDepthLightningModel(LightningModule):\n MODEL_NAMES = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith('__')\n and callable(models.__dict__[name]))\n\n def __init__(self,\n gpus: int = 1,\n data_root='data/nuScenes',\n eval_interval=1,\n batch_size_per_device=8,\n class_names=CLASSES,\n backbone_img_conf=backbone_img_conf,\n head_conf=head_conf,\n ida_aug_conf=ida_aug_conf,\n bda_aug_conf=bda_aug_conf,\n rda_aug_conf=rda_aug_conf,\n default_root_dir='./outputs/',\n **kwargs):\n super().__init__()\n self.save_hyperparameters()\n self.gpus = gpus\n self.optimizer_config = optimizer_config\n self.pretrain_config = pretrain_config\n self.eval_interval = eval_interval\n self.batch_size_per_device = batch_size_per_device\n self.data_root = data_root\n self.class_names = class_names\n self.backbone_img_conf = backbone_img_conf\n self.head_conf = head_conf\n self.ida_aug_conf = ida_aug_conf\n self.bda_aug_conf = bda_aug_conf\n self.rda_aug_conf = rda_aug_conf\n mmcv.mkdir_or_exist(default_root_dir)\n self.default_root_dir = default_root_dir\n self.evaluator = DetNuscEvaluator(class_names=self.class_names,\n output_dir=self.default_root_dir)\n self.model = BaseBEVDepth(self.backbone_img_conf,\n self.head_conf)\n self.mode = 'valid'\n self.img_conf = img_conf\n self.data_use_cbgs = False\n self.load_interval = 1\n self.num_sweeps = 1\n self.sweep_idxes = list()\n self.key_idxes = list()\n self.data_return_depth = True\n self.downsample_factor = self.backbone_img_conf['downsample_factor']\n self.dbound = self.backbone_img_conf['d_bound']\n self.depth_channels = int(\n (self.dbound[1] - self.dbound[0]) / self.dbound[2])\n self.use_fusion = False\n self.train_info_paths = 'data/nuScenes/nuscenes_infos_train.pkl'\n self.val_info_paths = 'data/nuScenes/nuscenes_infos_val.pkl'\n self.predict_info_paths = 'data/nuScenes/nuscenes_infos_test.pkl'\n\n self.return_image = True\n self.return_depth = True\n self.return_radar_pv = False\n\n self.remove_z_axis = True\n\n def forward(self, sweep_imgs, mats, is_train=False, **inputs):\n return self.model(sweep_imgs, mats, is_train=is_train)\n\n def training_step(self, batch):\n if self.global_rank == 0:\n for pg in self.trainer.optimizers[0].param_groups:\n self.log('learning_rate', pg[\"lr\"])\n\n (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch\n if torch.cuda.is_available():\n if self.return_image:\n sweep_imgs = sweep_imgs.cuda()\n for key, value in mats.items():\n mats[key] = value.cuda()\n if self.return_radar_pv:\n pts_pv = pts_pv.cuda()\n gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d]\n gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d]\n preds, depth_preds = self(sweep_imgs, mats,\n pts_pv=pts_pv,\n is_train=True)\n targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d)\n loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds)\n\n if len(depth_labels.shape) == 5:\n # only key-frame will calculate depth loss\n depth_labels = depth_labels[:, 0, ...].contiguous()\n loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds)\n self.log('train/detection', loss_detection)\n self.log('train/heatmap', loss_heatmap)\n self.log('train/bbox', loss_bbox)\n self.log('train/depth', loss_depth)\n\n return loss_detection + loss_depth\n\n def get_depth_loss(self, depth_labels, depth_preds, weight=3.):\n depth_labels = self.get_downsampled_gt_depth(depth_labels)\n depth_preds = depth_preds.permute(0, 2, 3, 1).contiguous().view(\n -1, self.depth_channels)\n fg_mask = torch.max(depth_labels, dim=1).values > 0.0\n\n with autocast(enabled=False):\n loss_depth = (F.binary_cross_entropy(\n depth_preds[fg_mask],\n depth_labels[fg_mask],\n reduction='none',\n ).sum() / max(1.0, fg_mask.sum()))\n\n return weight * loss_depth\n\n def get_downsampled_gt_depth(self, gt_depths):\n \"\"\"\n Input:\n gt_depths: [B, N, H, W]\n Output:\n gt_depths: [B*N*h*w, d]\n \"\"\"\n B, N, H, W = gt_depths.shape\n gt_depths = gt_depths.view(\n B * N,\n H // self.downsample_factor,\n self.downsample_factor,\n W // self.downsample_factor,\n self.downsample_factor,\n 1,\n )\n gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous()\n gt_depths = gt_depths.view(\n -1, self.downsample_factor * self.downsample_factor)\n gt_depths_tmp = torch.where(gt_depths == 0.0,\n 1e5 * torch.ones_like(gt_depths),\n gt_depths)\n gt_depths = torch.min(gt_depths_tmp, dim=-1).values\n gt_depths = gt_depths.view(B * N, H // self.downsample_factor,\n W // self.downsample_factor)\n\n gt_depths = (gt_depths -\n (self.dbound[0] - self.dbound[2])) / self.dbound[2]\n gt_depths = torch.where(\n (gt_depths < self.depth_channels + 1) & (gt_depths > 0.),\n gt_depths, torch.zeros_like(gt_depths))\n gt_depths = F.one_hot(gt_depths.long(),\n num_classes=self.depth_channels + 1).view(\n -1, self.depth_channels + 1)[:, 1:]\n return gt_depths.float()\n\n def eval_step(self, batch, batch_idx, prefix: str):\n (sweep_imgs, mats, img_metas, _, _, _, _, pts_pv) = batch\n if torch.cuda.is_available():\n if self.return_image:\n sweep_imgs = sweep_imgs.cuda()\n for key, value in mats.items():\n mats[key] = value.cuda()\n if self.return_radar_pv:\n pts_pv = pts_pv.cuda()\n preds = self(sweep_imgs, mats,\n pts_pv=pts_pv,\n is_train=False)\n if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):\n results = self.model.module.get_bboxes(preds, img_metas)\n else:\n results = self.model.get_bboxes(preds, img_metas)\n for i in range(len(results)):\n results[i][0] = results[i][0].tensor.detach().cpu().numpy()\n results[i][1] = results[i][1].detach().cpu().numpy()\n results[i][2] = results[i][2].detach().cpu().numpy()\n results[i].append(img_metas[i])\n return results\n\n def validation_epoch_end(self, validation_step_outputs):\n detection_losses = list()\n heatmap_losses = list()\n bbox_losses = list()\n depth_losses = list()\n for validation_step_output in validation_step_outputs:\n detection_losses.append(validation_step_output[0])\n heatmap_losses.append(validation_step_output[1])\n bbox_losses.append(validation_step_output[2])\n depth_losses.append(validation_step_output[3])\n synchronize()\n\n self.log('val/detection', torch.mean(torch.stack(detection_losses)), on_epoch=True)\n self.log('val/heatmap', torch.mean(torch.stack(heatmap_losses)), on_epoch=True)\n self.log('val/bbox', torch.mean(torch.stack(bbox_losses)), on_epoch=True)\n self.log('val/depth', torch.mean(torch.stack(depth_losses)), on_epoch=True)\n\n def validation_step(self, batch, batch_idx):\n (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch\n if torch.cuda.is_available():\n if self.return_image:\n sweep_imgs = sweep_imgs.cuda()\n for key, value in mats.items():\n mats[key] = value.cuda()\n if self.return_radar_pv:\n pts_pv = pts_pv.cuda()\n gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d]\n gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d]\n with torch.no_grad():\n preds, depth_preds = self(sweep_imgs, mats,\n pts_pv=pts_pv,\n is_train=True)\n targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d)\n loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds)\n\n if len(depth_labels.shape) == 5:\n # only key-frame will calculate depth loss\n depth_labels = depth_labels[:, 0, ...].contiguous()\n loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds, weight=3.)\n return loss_detection, loss_heatmap, loss_bbox, loss_depth\n\n def test_epoch_end(self, test_step_outputs):\n all_pred_results = list()\n all_img_metas = list()\n for test_step_output in test_step_outputs:\n for i in range(len(test_step_output)):\n all_pred_results.append(test_step_output[i][:3])\n all_img_metas.append(test_step_output[i][3])\n synchronize()\n # TODO: Change another way.\n dataset_length = len(self.val_dataloader().dataset)\n all_pred_results = sum(\n map(list, zip(*all_gather_object(all_pred_results))),\n [])[:dataset_length]\n all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))),\n [])[:dataset_length]\n if self.global_rank == 0:\n self.evaluator.evaluate(all_pred_results, all_img_metas)\n\n def configure_optimizers(self):\n optimizer = build_optimizer(self.model, self.optimizer_config)\n scheduler = MultiStepLR(optimizer, [19, 23])\n return [[optimizer], [scheduler]]\n\n def train_dataloader(self):\n train_dataset = NuscDatasetRadarDet(\n ida_aug_conf=self.ida_aug_conf,\n bda_aug_conf=self.bda_aug_conf,\n rda_aug_conf=self.rda_aug_conf,\n img_backbone_conf=self.backbone_img_conf,\n classes=self.class_names,\n data_root=self.data_root,\n info_paths=self.train_info_paths,\n is_train=True,\n use_cbgs=self.data_use_cbgs,\n img_conf=self.img_conf,\n load_interval=self.load_interval,\n num_sweeps=self.num_sweeps,\n sweep_idxes=self.sweep_idxes,\n key_idxes=self.key_idxes,\n return_image=self.return_image,\n return_depth=self.return_depth,\n return_radar_pv=self.return_radar_pv,\n remove_z_axis=self.remove_z_axis,\n depth_path='depth_gt',\n radar_pv_path='radar_pv_filter'\n )\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=self.batch_size_per_device,\n num_workers=4,\n drop_last=True,\n shuffle=False,\n collate_fn=partial(collate_fn,\n is_return_image=self.return_image,\n is_return_depth=self.return_depth,\n is_return_radar_pv=self.return_radar_pv),\n sampler=None,\n )\n return train_loader\n\n def val_dataloader(self):\n val_dataset = NuscDatasetRadarDet(\n ida_aug_conf=self.ida_aug_conf,\n bda_aug_conf=self.bda_aug_conf,\n rda_aug_conf=self.rda_aug_conf,\n img_backbone_conf=self.backbone_img_conf,\n classes=self.class_names,\n data_root=self.data_root,\n info_paths=self.val_info_paths,\n is_train=False,\n img_conf=self.img_conf,\n load_interval=self.load_interval,\n num_sweeps=self.num_sweeps,\n sweep_idxes=self.sweep_idxes,\n key_idxes=self.key_idxes,\n return_image=self.return_image,\n return_depth=self.return_depth,\n return_radar_pv=self.return_radar_pv,\n remove_z_axis=self.remove_z_axis,\n radar_pv_path='radar_pv_filter',\n )\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=self.batch_size_per_device,\n num_workers=4,\n shuffle=False,\n collate_fn=partial(collate_fn,\n is_return_image=self.return_image,\n is_return_depth=self.return_depth,\n is_return_radar_pv=self.return_radar_pv),\n sampler=None,\n )\n return val_loader\n\n def test_dataloader(self):\n return self.val_dataloader()\n\n def predict_dataloader(self):\n predict_dataset = NuscDatasetRadarDet(\n ida_aug_conf=self.ida_aug_conf,\n bda_aug_conf=self.bda_aug_conf,\n rda_aug_conf=self.rda_aug_conf,\n img_backbone_conf=self.backbone_img_conf,\n classes=self.class_names,\n data_root=self.data_root,\n info_paths=self.val_info_paths,\n is_train=False,\n img_conf=self.img_conf,\n load_interval=self.load_interval,\n num_sweeps=self.num_sweeps,\n sweep_idxes=self.sweep_idxes,\n key_idxes=self.key_idxes,\n return_image=self.return_image,\n return_depth=self.return_depth,\n return_radar_pv=self.return_radar_pv,\n remove_z_axis=self.remove_z_axis,\n radar_pv_path='radar_pv_filter',\n )\n predict_loader = torch.utils.data.DataLoader(\n predict_dataset,\n batch_size=self.batch_size_per_device,\n num_workers=4,\n shuffle=False,\n collate_fn=partial(collate_fn,\n is_return_image=self.return_image,\n is_return_depth=self.return_depth,\n is_return_radar_pv=self.return_radar_pv),\n sampler=None,\n )\n return predict_loader\n\n def test_step(self, batch, batch_idx):\n return self.eval_step(batch, batch_idx, 'test')\n\n def predict_step(self, batch, batch_idx):\n return self.eval_step(batch, batch_idx, 'predict')\n\n @staticmethod\n def add_model_specific_args(parent_parser): # pragma: no-cover\n return parent_parser"
},
{
"identifier": "CameraRadarNetDet",
"path": "models/camera_radar_net_det.py",
"snippet": "class CameraRadarNetDet(BaseBEVDepth):\n \"\"\"Source code of `CRN`, `https://arxiv.org/abs/2304.00670`.\n\n Args:\n backbone_img_conf (dict): Config of image backbone.\n backbone_pts_conf (dict): Config of point backbone.\n fuser_conf (dict): Config of BEV feature fuser.\n head_conf (dict): Config of head.\n \"\"\"\n\n def __init__(self, backbone_img_conf, backbone_pts_conf, fuser_conf, head_conf):\n super(BaseBEVDepth, self).__init__()\n self.backbone_img = RVTLSSFPN(**backbone_img_conf)\n self.backbone_pts = PtsBackbone(**backbone_pts_conf)\n self.fuser = MFAFuser(**fuser_conf)\n self.head = BEVDepthHead(**head_conf)\n\n self.radar_view_transform = backbone_img_conf['radar_view_transform']\n\n # inference time measurement\n self.idx = 0\n self.times_dict = {\n 'img': [],\n 'img_backbone': [],\n 'img_dep': [],\n 'img_transform': [],\n 'img_pool': [],\n\n 'pts': [],\n 'pts_voxelize': [],\n 'pts_backbone': [],\n 'pts_head': [],\n\n 'fusion': [],\n 'fusion_pre': [],\n 'fusion_layer': [],\n 'fusion_post': [],\n\n 'head': [],\n 'head_backbone': [],\n 'head_head': [],\n }\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n sweep_ptss=None,\n is_train=False\n ):\n \"\"\"Forward function for BEVDepth\n\n Args:\n sweep_imgs (Tensor): Input images.\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n sweep_ptss (Tensor): Input points.\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if is_train:\n self.time = None\n\n ptss_context, ptss_occupancy, _ = self.backbone_pts(sweep_ptss)\n feats, depth, _ = self.backbone_img(sweep_imgs,\n mats_dict,\n ptss_context,\n ptss_occupancy,\n return_depth=True)\n fused, _ = self.fuser(feats)\n preds, _ = self.head(fused)\n return preds, depth\n else:\n if self.idx < 100: # skip few iterations for warmup\n self.times = None\n elif self.idx == 100:\n self.times = self.times_dict\n\n ptss_context, ptss_occupancy, self.times = self.backbone_pts(sweep_ptss,\n times=self.times)\n feats, self.times = self.backbone_img(sweep_imgs,\n mats_dict,\n ptss_context,\n ptss_occupancy,\n times=self.times)\n fused, self.times = self.fuser(feats, times=self.times)\n preds, self.times = self.head(fused, times=self.times)\n\n if self.idx == 1000:\n time_mean = {}\n for k, v in self.times.items():\n time_mean[k] = sum(v) / len(v)\n print('img: %.2f' % time_mean['img'])\n print(' img_backbone: %.2f' % time_mean['img_backbone'])\n print(' img_dep: %.2f' % time_mean['img_dep'])\n print(' img_transform: %.2f' % time_mean['img_transform'])\n print(' img_pool: %.2f' % time_mean['img_pool'])\n print('pts: %.2f' % time_mean['pts'])\n print(' pts_voxelize: %.2f' % time_mean['pts_voxelize'])\n print(' pts_backbone: %.2f' % time_mean['pts_backbone'])\n print(' pts_head: %.2f' % time_mean['pts_head'])\n print('fusion: %.2f' % time_mean['fusion'])\n print(' fusion_pre: %.2f' % time_mean['fusion_pre'])\n print(' fusion_layer: %.2f' % time_mean['fusion_layer'])\n print(' fusion_post: %.2f' % time_mean['fusion_post'])\n print('head: %.2f' % time_mean['head'])\n print(' head_backbone: %.2f' % time_mean['head_backbone'])\n print(' head_head: %.2f' % time_mean['head_head'])\n total = time_mean['pts'] + time_mean['img'] + time_mean['fusion'] + time_mean['head']\n print('total: %.2f' % total)\n print(' ')\n print('FPS: %.2f' % (1000/total))\n\n self.idx += 1\n return preds"
}
] | import torch
from utils.torch_dist import synchronize
from exps.base_cli import run_cli
from exps.base_exp import BEVDepthLightningModel
from models.camera_radar_net_det import CameraRadarNetDet | 8,895 | point_cloud_range=[0, 2.0, 0, 704, 58.0, 2],
max_voxels=(768, 1024)
),
'pts_voxel_encoder': dict(
type='PillarFeatureNet',
in_channels=5,
feat_channels=[32, 64],
with_distance=False,
with_cluster_center=False,
with_voxel_center=True,
voxel_size=[8, 0.4, 2],
point_cloud_range=[0, 2.0, 0, 704, 58.0, 2],
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
legacy=True
),
'pts_middle_encoder': dict(
type='PointPillarsScatter',
in_channels=64,
output_shape=(140, 88)
),
'pts_backbone': dict(
type='SECOND',
in_channels=64,
out_channels=[64, 128, 256],
layer_nums=[2, 3, 3],
layer_strides=[1, 2, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
conv_cfg=dict(type='Conv2d', bias=True, padding_mode='reflect')
),
'pts_neck': dict(
type='SECONDFPN',
in_channels=[64, 128, 256],
out_channels=[64, 64, 64],
upsample_strides=[0.5, 1, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
upsample_cfg=dict(type='deconv', bias=False),
use_conv_for_no_stride=True
),
'out_channels_pts': 80,
}
################################################
self.fuser_conf = {
'img_dims': 80,
'pts_dims': 80,
'embed_dims': 128,
'num_layers': 6,
'num_heads': 4,
'bev_shape': (128, 128),
}
################################################
self.head_conf = {
'bev_backbone_conf': dict(
type='ResNet',
in_channels=128,
depth=18,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=[0, 1, 2],
norm_eval=False,
base_channels=128,
),
'bev_neck_conf': dict(
type='SECONDFPN',
in_channels=[128, 128, 256, 512],
upsample_strides=[1, 2, 4, 8],
out_channels=[64, 64, 64, 64]
),
'tasks': [
dict(num_class=1, class_names=['car']),
dict(num_class=2, class_names=['truck', 'construction_vehicle']),
dict(num_class=2, class_names=['bus', 'trailer']),
dict(num_class=1, class_names=['barrier']),
dict(num_class=2, class_names=['motorcycle', 'bicycle']),
dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),
],
'common_heads': dict(
reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)),
'bbox_coder': dict(
type='CenterPointBBoxCoder',
post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
max_num=500,
score_threshold=0.01,
out_size_factor=4,
voxel_size=[0.2, 0.2, 8],
pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3],
code_size=9,
),
'train_cfg': dict(
point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3],
grid_size=[512, 512, 1],
voxel_size=[0.2, 0.2, 8],
out_size_factor=4,
dense_reg=1,
gaussian_overlap=0.1,
max_objs=500,
min_radius=2,
code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
),
'test_cfg': dict(
post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
max_per_img=500,
max_pool_nms=False,
min_radius=[4, 12, 10, 1, 0.85, 0.175],
score_threshold=0.01,
out_size_factor=4,
voxel_size=[0.2, 0.2, 8],
nms_type='circle',
pre_max_size=1000,
post_max_size=200,
nms_thr=0.2,
),
'in_channels': 256, # Equal to bev_neck output_channels.
'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'),
'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25),
'gaussian_overlap': 0.1,
'min_radius': 2,
}
################################################
self.key_idxes = [-2, -4, -6]
| """
mAP: 0.4492
mATE: 0.5236
mASE: 0.2857
mAOE: 0.5640
mAVE: 0.2781
mAAE: 0.1792
NDS: 0.5415
Eval time: 185.7s
Per-class results:
Object Class AP ATE ASE AOE AVE AAE
car 0.702 0.312 0.172 0.146 0.306 0.197
truck 0.406 0.501 0.221 0.153 0.235 0.207
bus 0.506 0.542 0.210 0.130 0.404 0.178
trailer 0.227 0.880 0.252 0.600 0.205 0.100
construction_vehicle 0.133 0.819 0.518 1.251 0.111 0.352
pedestrian 0.450 0.558 0.291 0.683 0.368 0.174
motorcycle 0.478 0.413 0.257 0.820 0.425 0.213
bicycle 0.442 0.409 0.268 1.140 0.171 0.012
traffic_cone 0.544 0.414 0.378 nan nan nan
barrier 0.604 0.388 0.291 0.153 nan nan
img: 10.84
img_backbone: 3.62
img_dep: 1.35
img_transform: 5.01
img_pool: 0.54
pts: 8.46
pts_voxelize: 1.87
pts_backbone: 5.27
pts_head: 0.64
fusion: 6.77
fusion_pre: 0.81
fusion_layer: 5.31
fusion_post: 0.07
head: 7.97
head_backbone: 2.14
head_head: 5.83
total: 34.04
FPS: 29.38
| Name | Type | Params
---------------------------------------------------------------------------------------
0 | model | CameraRadarNetDet | 37.2 M
1 | model.backbone_img | RVTLSSFPN | 17.0 M
2 | model.backbone_img.img_backbone | ResNet | 11.2 M
3 | model.backbone_img.img_neck | SECONDFPN | 246 K
4 | model.backbone_img.depth_net | DepthNet | 4.8 M
5 | model.backbone_img.view_aggregation_net | ViewAggregation | 807 K
6 | model.backbone_pts | PtsBackbone | 3.1 M
7 | model.backbone_pts.pts_voxel_layer | Voxelization | 0
8 | model.backbone_pts.pts_voxel_encoder | PillarFeatureNet | 2.3 K
9 | model.backbone_pts.pts_middle_encoder | PointPillarsScatter | 0
10 | model.backbone_pts.pts_backbone | SECOND | 2.7 M
11 | model.backbone_pts.pts_neck | SECONDFPN | 90.5 K
12 | model.backbone_pts.pred_context | Sequential | 173 K
13 | model.backbone_pts.pred_occupancy | Sequential | 166 K
14 | model.fuser | MFAFuser | 1.2 M
15 | model.fuser.norm_img | LayerNorm | 160
16 | model.fuser.norm_pts | LayerNorm | 160
17 | model.fuser.input_proj | Linear | 20.6 K
18 | model.fuser.positional_encoding | LearnedPositionalEncoding | 16.4 K
19 | model.fuser.ffn_layers | ModuleList | 395 K
20 | model.fuser.norm_layers1 | ModuleList | 1.5 K
21 | model.fuser.norm_layers2 | ModuleList | 1.5 K
22 | model.fuser.attn_layers | ModuleList | 198 K
23 | model.fuser.reduce_conv | Sequential | 590 K
24 | model.head | BEVDepthHead | 15.8 M
25 | model.head.loss_cls | GaussianFocalLoss | 0
26 | model.head.loss_bbox | L1Loss | 0
27 | model.head.shared_conv | ConvModule | 147 K
28 | model.head.task_heads | ModuleList | 1.4 M
29 | model.head.trunk | ResNet | 11.9 M
30 | model.head.neck | SECONDFPN | 2.4 M
---------------------------------------------------------------------------------------
"""
class CRNLightningModel(BEVDepthLightningModel):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.return_image = True
self.return_depth = True
self.return_radar_pv = True
################################################
self.optimizer_config = dict(
type='AdamW',
lr=2e-4,
weight_decay=1e-4)
################################################
self.ida_aug_conf = {
'resize_lim': (0.386, 0.55),
'final_dim': (256, 704),
'rot_lim': (0., 0.),
'H': 900,
'W': 1600,
'rand_flip': True,
'bot_pct_lim': (0.0, 0.0),
'cams': [
'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT',
'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'
],
'Ncams': 6,
}
self.bda_aug_conf = {
'rot_ratio': 1.0,
'rot_lim': (-22.5, 22.5),
'scale_lim': (0.9, 1.1),
'flip_dx_ratio': 0.5,
'flip_dy_ratio': 0.5
}
################################################
self.backbone_img_conf = {
'x_bound': [-51.2, 51.2, 0.8],
'y_bound': [-51.2, 51.2, 0.8],
'z_bound': [-5, 3, 8],
'd_bound': [2.0, 58.0, 0.8],
'final_dim': (256, 704),
'downsample_factor': 16,
'img_backbone_conf': dict(
type='ResNet',
depth=18,
frozen_stages=0,
out_indices=[0, 1, 2, 3],
norm_eval=False,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18'),
),
'img_neck_conf': dict(
type='SECONDFPN',
in_channels=[64, 128, 256, 512],
upsample_strides=[0.25, 0.5, 1, 2],
out_channels=[64, 64, 64, 64],
),
'depth_net_conf':
dict(in_channels=256, mid_channels=256),
'radar_view_transform': True,
'camera_aware': False,
'output_channels': 80,
}
################################################
self.backbone_pts_conf = {
'pts_voxel_layer': dict(
max_num_points=8,
voxel_size=[8, 0.4, 2],
point_cloud_range=[0, 2.0, 0, 704, 58.0, 2],
max_voxels=(768, 1024)
),
'pts_voxel_encoder': dict(
type='PillarFeatureNet',
in_channels=5,
feat_channels=[32, 64],
with_distance=False,
with_cluster_center=False,
with_voxel_center=True,
voxel_size=[8, 0.4, 2],
point_cloud_range=[0, 2.0, 0, 704, 58.0, 2],
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
legacy=True
),
'pts_middle_encoder': dict(
type='PointPillarsScatter',
in_channels=64,
output_shape=(140, 88)
),
'pts_backbone': dict(
type='SECOND',
in_channels=64,
out_channels=[64, 128, 256],
layer_nums=[2, 3, 3],
layer_strides=[1, 2, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
conv_cfg=dict(type='Conv2d', bias=True, padding_mode='reflect')
),
'pts_neck': dict(
type='SECONDFPN',
in_channels=[64, 128, 256],
out_channels=[64, 64, 64],
upsample_strides=[0.5, 1, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
upsample_cfg=dict(type='deconv', bias=False),
use_conv_for_no_stride=True
),
'out_channels_pts': 80,
}
################################################
self.fuser_conf = {
'img_dims': 80,
'pts_dims': 80,
'embed_dims': 128,
'num_layers': 6,
'num_heads': 4,
'bev_shape': (128, 128),
}
################################################
self.head_conf = {
'bev_backbone_conf': dict(
type='ResNet',
in_channels=128,
depth=18,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=[0, 1, 2],
norm_eval=False,
base_channels=128,
),
'bev_neck_conf': dict(
type='SECONDFPN',
in_channels=[128, 128, 256, 512],
upsample_strides=[1, 2, 4, 8],
out_channels=[64, 64, 64, 64]
),
'tasks': [
dict(num_class=1, class_names=['car']),
dict(num_class=2, class_names=['truck', 'construction_vehicle']),
dict(num_class=2, class_names=['bus', 'trailer']),
dict(num_class=1, class_names=['barrier']),
dict(num_class=2, class_names=['motorcycle', 'bicycle']),
dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),
],
'common_heads': dict(
reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)),
'bbox_coder': dict(
type='CenterPointBBoxCoder',
post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
max_num=500,
score_threshold=0.01,
out_size_factor=4,
voxel_size=[0.2, 0.2, 8],
pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3],
code_size=9,
),
'train_cfg': dict(
point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3],
grid_size=[512, 512, 1],
voxel_size=[0.2, 0.2, 8],
out_size_factor=4,
dense_reg=1,
gaussian_overlap=0.1,
max_objs=500,
min_radius=2,
code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
),
'test_cfg': dict(
post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
max_per_img=500,
max_pool_nms=False,
min_radius=[4, 12, 10, 1, 0.85, 0.175],
score_threshold=0.01,
out_size_factor=4,
voxel_size=[0.2, 0.2, 8],
nms_type='circle',
pre_max_size=1000,
post_max_size=200,
nms_thr=0.2,
),
'in_channels': 256, # Equal to bev_neck output_channels.
'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'),
'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25),
'gaussian_overlap': 0.1,
'min_radius': 2,
}
################################################
self.key_idxes = [-2, -4, -6] | self.model = CameraRadarNetDet(self.backbone_img_conf, | 3 | 2023-12-06 14:57:49+00:00 | 12k |
jinxixiang/magic_animate_unofficial | animatediff/magic_animate/unet_controlnet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "animatediff/magic_animate/unet_3d_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "animatediff/magic_animate/unet_3d_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "animatediff/magic_animate/unet_3d_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "animatediff/magic_animate/unet_3d_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "animatediff/magic_animate/unet_3d_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "animatediff/magic_animate/unet_3d_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "animatediff/magic_animate/unet_3d_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "InflatedConv3d",
"path": "animatediff/magic_animate/resnet.py",
"snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
},
{
"identifier": "Resampler",
"path": "animatediff/magic_animate/resampler.py",
"snippet": "class Resampler(nn.Module):\n def __init__(\n self,\n dim=1024,\n depth=8,\n dim_head=64,\n heads=16,\n num_queries=8,\n embedding_dim=768,\n output_dim=1024,\n ff_mult=4,\n max_seq_len: int = 257, # CLIP tokens + CLS token\n apply_pos_emb: bool = False,\n num_latents_mean_pooled: int = 0, # number of latents derived from mean pooled representation of the sequence\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, embedding_dim) if apply_pos_emb else None\n\n self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)\n\n self.proj_in = nn.Linear(embedding_dim, dim)\n\n self.proj_out = nn.Linear(dim, output_dim)\n self.norm_out = nn.LayerNorm(output_dim)\n\n self.to_latents_from_mean_pooled_seq = (\n nn.Sequential(\n nn.LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange(\"b (n d) -> b n d\", n=num_latents_mean_pooled),\n )\n if num_latents_mean_pooled > 0\n else None\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(\n nn.ModuleList(\n [\n PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),\n FeedForward(dim=dim, mult=ff_mult),\n ]\n )\n )\n\n def forward(self, x):\n if self.pos_emb is not None:\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device=device))\n x = x + pos_emb\n\n latents = self.latents.repeat(x.size(0), 1, 1)\n\n x = self.proj_in(x)\n\n if self.to_latents_from_mean_pooled_seq:\n meanpooled_seq = masked_mean(x, dim=1, mask=torch.ones(x.shape[:2], device=x.device, dtype=torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim=-2)\n\n for attn, ff in self.layers:\n latents = attn(x, latents) + latents\n latents = ff(latents) + latents\n\n latents = self.proj_out(latents)\n return self.norm_out(latents)"
}
] | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from einops import rearrange
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from animatediff.magic_animate.unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d
from .resampler import Resampler
from diffusers.utils import WEIGHTS_NAME
import os
import json
import torch
import torch.nn as nn
import torch.utils.checkpoint | 9,759 | # up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False):
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class ControlNetConditioningEmbedding(nn.Module):
"""
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
model) to encode image-space conditions ... into feature maps ..."
"""
def __init__(
self,
conditioning_embedding_channels: int,
conditioning_channels: int = 3,
block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
):
super().__init__()
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
self.blocks = nn.ModuleList([])
for i in range(len(block_out_channels) - 1):
channel_in = block_out_channels[i]
channel_out = block_out_channels[i + 1]
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
self.conv_out = zero_module(
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
)
def forward(self, conditioning):
bsz = conditioning.shape[0]
conditioning = rearrange(conditioning, 'b c f h w -> (b f) c h w').contiguous()
embedding = self.conv_in(conditioning)
embedding = torch.nn.functional.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = torch.nn.functional.silu(embedding)
embedding = self.conv_out(embedding)
embedding = rearrange(embedding, '(b f) c h w -> b c f h w', b=bsz).contiguous()
return embedding
def zero_module(module):
for p in module.parameters():
nn.init.zeros_(p)
return module
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
# Additional
use_motion_module=False,
motion_module_resolutions=(1, 2, 4, 8),
motion_module_mid_block=False,
motion_module_decoder_only=False,
motion_module_type=None,
motion_module_kwargs={},
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
# Addition for image embeddings
use_image_condition=False,
# Additional for dwpose adapter
use_dwpose_adapter=False,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
# dwpose condition
if use_dwpose_adapter:
self.dwpose_adapter = ControlNetConditioningEmbedding(conditioning_embedding_channels=4) # pose guider net
else:
self.dwpose_adapter = None
self.use_image_condition = False
if use_image_condition:
self.use_image_condition = True
self.image_proj_model = Resampler(
dim=cross_attention_dim,
depth=4,
dim_head=64,
heads=12,
num_queries=16,
embedding_dim=1024,
output_dim=cross_attention_dim,
ff_mult=4,
)
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (
not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and motion_module_mid_block,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
# count how many layers upsample the videos
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False): | if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): | 1 | 2023-12-12 00:16:39+00:00 | 12k |
Chat-3D/Chat-3D-v2 | others/process_vil3dref_results.py | [
{
"identifier": "Chat3D",
"path": "models/chat3d.py",
"snippet": "class Chat3D(nn.Module):\n \"\"\"\n VideoChat model.\n \"\"\"\n def __init__(self, config):\n super().__init__()\n llama_model_path = config.get(\"llama_model_path\")\n low_resource = config.get(\"low_resource\", False)\n # prompt\n self.prompt_template = config.get(\"prompt_template\", \"\")\n self.max_txt_len = config.get(\"max_txt_len\", 32)\n self.end_sym = config.get(\"end_sym\", '\\n')\n self.system_path = config.get(\"system_path\", \"\")\n self.begin_signal = \"###\"\n self.role = (\"Human\", \"Assistant\")\n self.pc_start_token, self.pc_end_token = \"<Target>\", \"</Target>\"\n self.scene_start_token, self.scene_end_token = \"<Scene>\", \"</Scene>\"\n self.add_scene_token = config.get(\"add_scene_token\", True)\n self.debug = config.get(\"debug\", False)\n self.obj_norm_scale = config.get(\"obj_norm_scale\", 1)\n self.scene_norm_scale = config.get(\"scene_norm_scale\", 1)\n self.grad_scale = config.get(\"grad_scale\", 1)\n\n mlp_dropout = config.get(\"mlp_dropout\", 0.5)\n self.stage = config.get(\"stage\", 1)\n\n self.low_resource = low_resource\n\n self.input_dim = config.get(\"input_dim\", 512)\n self.attr_dim = config.get(\"attr_dim\", 512)\n self.inter_dim = self.input_dim + self.attr_dim * 2\n\n if not self.debug:\n logger.info('Loading LLAMA')\n self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False)\n self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token\n if self.low_resource:\n self.llama_model = LlamaForCausalLM.from_pretrained(\n llama_model_path,\n torch_dtype=torch.float16,\n load_in_8bit=True,\n device_map=\"auto\"\n )\n else:\n self.llama_model = LlamaForCausalLM.from_pretrained(\n llama_model_path,\n torch_dtype=torch.float16,\n )\n logger.info(\"freeze LLAMA\")\n for name, param in self.llama_model.named_parameters():\n param.requires_grad = False\n # if self.stage != 1:\n # for layer_ind in range(30, 32):\n # for param in self.llama_model.model.layers[layer_ind].parameters():\n # param.requires_grad = True\n # param.data = param.data.float()\n self.llama_dim = self.llama_model.config.hidden_size\n logger.info('Loading LLAMA Done')\n else:\n self.llama_model = None\n self.llama_dim = 4096\n\n # self.object_input_proj = nn.Sequential(\n # nn.Linear(self.input_dim, self.input_dim),\n # # nn.ReLU(),\n # # nn.LayerNorm(self.input_dim),\n # )\n self.coord_proj = nn.Sequential(\n nn.Linear(3, self.attr_dim),\n # nn.ReLU(),\n # nn.LayerNorm(self.attr_dim),\n # nn.Dropout(mlp_dropout)\n )\n self.color_proj = nn.Sequential(\n nn.Linear(3, self.attr_dim),\n # nn.ReLU(),\n # nn.LayerNorm(self.attr_dim),\n # nn.Dropout(mlp_dropout)\n )\n # self.color_dropout = nn.Dropout(mlp_dropout)\n # self.pos_proj = nn.Sequential(\n # nn.Linear(6, self.inter_dim),\n # nn.LayerNorm(self.inter_dim)\n # )\n # self.pos_embedding = PositionalEmbedding(dim=self.llama_dim)\n self.pos_proj = nn.Sequential(\n nn.Linear(3, self.llama_dim)\n )\n self.object_proj = nn.Sequential(\n nn.Linear(self.inter_dim, self.llama_dim),\n nn.GELU(),\n nn.Dropout(mlp_dropout),\n nn.LayerNorm(self.llama_dim),\n nn.Linear(self.llama_dim, self.llama_dim)\n )\n self.scene_proj = nn.Sequential(\n nn.Linear(self.llama_dim, self.llama_dim),\n )\n self.encoder_num_layers = int(config.get(\"encoder_num_layers\", 1))\n self.relation_module = CMT(hidden_size=self.llama_dim, num_layers=self.encoder_num_layers)\n # self.cls_head = nn.Sequential(\n # nn.Linear(self.llama_dim, 40)\n # )\n\n if self.stage == 1:\n for p in self.relation_module.parameters():\n p.requires_grad = False\n for p in self.scene_proj.parameters():\n p.requires_grad = False\n for p in self.pos_proj.parameters():\n p.requires_grad = False\n # for p in self.pos_embedding.parameters():\n # p.requires_grad = False\n # for p in self.relation_module.parameters():\n # p.requires_grad = False\n # else:\n # for p in self.size_color_proj.parameters():\n # p.requires_grad = False\n # for p in self.scene_proj.parameters():\n # p.requires_grad = False\n # else:\n # for p in self.size_color_proj.parameters():\n # p.requires_grad = False\n # for p in self.scene_proj.parameters():\n # p.requires_grad = False\n\n with open(self.system_path, \"r\") as f:\n self.system = \"\\n\".join([x.strip() for x in f.readlines()])\n\n if not self.debug:\n self.object_norm = torch.norm(self.get_text_emb(\"object\"), p=2)\n self.relation_norm = torch.norm(self.get_text_emb(\"relation\"), p=2)\n self.position_norm = torch.norm(self.get_text_emb(\"position\"), p=2)\n if self.stage != 1:\n self.object_list_embed, self.object_list_ind = self.prepare_object_list()\n self.p_0_embed, self.p_1_embed = self.prepare_system_embed()\n\n # def process_prompt(self, prompt_path, prompt_template):\n # with open(prompt_path, 'r') as f:\n # prompt_candidates = f.read().splitlines()\n # with open(self.system_path, \"r\") as f:\n # system = \"\\n\".join([x.strip() for x in f.readlines()])\n # prompt_list = [system + \" \" + prompt_template.format(p) for p in prompt_candidates]\n # logger.info(f'Load {len(prompt_list)} training prompts')\n # logger.info(f'Prompt: {prompt_list}')\n # return prompt_list\n\n # def prompt_wrap(self, scene_embed, scene_mask, prompts, is_eval=False):\n # batch_size = scene_embed.shape[0]\n # for i, prompt in enumerate(prompts):\n # p_0, p_1 = prompt.split('<REPLACE>')\n # p_0_tokens = self.llama_tokenizer(p_0, return_tensors=\"pt\", add_special_tokens=is_eval).to(scene_embed.device)\n # p_1_tokens = self.llama_tokenizer(p_1, return_tensors=\"pt\", add_special_tokens=False).to(scene_embed.device)\n # # p_2_tokens = self.llama_tokenizer(p_2, return_tensors=\"pt\", add_special_tokens=False).to(pc_embed.device)\n # p_0_embeds = self.llama_model.model.embed_tokens(p_0_tokens.input_ids).expand(batch_size, -1, -1)\n # p_1_embeds = self.llama_model.model.embed_tokens(p_1_tokens.input_ids).expand(batch_size, -1, -1)\n # p_2_embeds = self.llama_model.model.embed_tokens(p_2_tokens.input_ids).expand(batch_size, -1, -1)\n # wrapped_embeds = torch.cat([p_0_embeds, pc_embed, p_1_embeds, scene_embed, p_2_embeds], dim=1)\n # wrapped_atts = scene_attn[:, :1].expand(-1, wrapped_embeds.shape[1])\n # return wrapped_embeds, wrapped_atts\n\n # def get_object_list_embed(self, scene_embed, scene_mask):\n # # scene_embed: (obj_num, dim)\n # embed_list = []\n # for i in range(scene_embed.shape[0]):\n # if scene_mask[i] == 0:\n # break\n # text = \"\"\n # if i > 0:\n # text += \", \"\n # text += f\"obj{i:02}: \"\n # text_embeds = self.get_text_emb(text, scene_embed.device).detach()\n # embed_list.extend([text_embeds.squeeze(0), scene_embed[i:i+1]])\n # return torch.cat(embed_list, dim=0)\n\n def prepare_object_list(self, max_obj_num=150):\n tmp_id = 0\n embed_list = []\n obj_index_list = []\n for i in range(max_obj_num):\n text = \"\" if i == 0 else \"; \"\n text += f\"obj{i:02} \"\n text_embeds = self.get_text_emb(text).squeeze(0)\n tmp_id += text_embeds.shape[0]\n obj_index_list.append(tmp_id)\n if self.add_scene_token:\n embed_list.extend([text_embeds, torch.zeros((2, text_embeds.shape[-1]))])\n tmp_id += 2\n else:\n embed_list.extend([text_embeds, torch.zeros((1, text_embeds.shape[-1]))])\n tmp_id += 1\n return torch.cat(embed_list, dim=0), obj_index_list\n\n def prepare_system_embed(self):\n prompt = self.system + \" \"\n p_0, p_1 = prompt.split(\"<REPLACE>\")\n p_0_token = self.llama_tokenizer(p_0, return_tensors=\"pt\", add_special_tokens=False)\n p_1_token = self.llama_tokenizer(p_1, return_tensors=\"pt\", add_special_tokens=False)\n p_0_embed = self.llama_model.model.embed_tokens(p_0_token.input_ids).squeeze(0)\n p_1_embed = self.llama_model.model.embed_tokens(p_1_token.input_ids).squeeze(0)\n return p_0_embed, p_1_embed\n\n def get_text_emb(self, text, device=\"cpu\"):\n text_tokens = self.llama_tokenizer(text, return_tensors=\"pt\", add_special_tokens=False).to(device)\n return self.llama_model.model.embed_tokens(text_tokens.input_ids)\n\n def encode_object_feat(self, feat, locs, colors):\n # feat = self.object_input_proj(feat)\n size_emb = self.coord_proj(locs[:, :, 3:6])\n gmm_weights = colors[..., :1]\n gmm_means = colors[..., 1:]\n gmm_colors = torch.sum(gmm_weights * gmm_means, dim=2)\n # color_emb = self.color_dropout(torch.sum(self.color_proj(gmm_means) * gmm_weights, dim=2))\n color_emb = self.color_proj(gmm_colors)\n feat = torch.cat([feat, size_emb, color_emb], dim=-1)\n # feat = torch.cat([feat, size_emb], dim=-1)\n # feat = self.scene_proj(feat)\n return feat\n\n @staticmethod\n def get_dist_attention(pos, dist_exp=1):\n # pos (bs, obj_num, 3)\n dist = pos.unsqueeze(1) - pos.unsqueeze(2)\n dist = torch.sum(dist.abs()**dist_exp, dim=-1)\n dist_attn = torch.nn.functional.softmax(-dist, dim=-1)\n return dist_attn\n\n def insert_object_embed(self, embed_1, embed_2, scene_mask, detach_mask=None):\n if detach_mask is not None:\n embed_1_detached = CustomGradLayer.apply(embed_1[detach_mask], self.grad_scale)\n embed_1[detach_mask] = embed_1_detached\n if embed_2 is not None:\n embed_2_detached = CustomGradLayer.apply(embed_2[detach_mask], self.grad_scale)\n embed_2[detach_mask] = embed_2_detached\n obj_num = int(scene_mask.sum())\n mx_ind = self.object_list_ind[obj_num - 1] + (2 if self.add_scene_token else 1)\n object_list_embed = self.object_list_embed[:mx_ind, :].to(embed_1.device)\n object_list_ind = torch.tensor(self.object_list_ind[:obj_num], dtype=torch.long)\\\n .to(embed_1.device)\n object_list_embed[object_list_ind] = embed_1[scene_mask.bool()].to(object_list_embed.dtype)\n if self.add_scene_token:\n object_list_embed[object_list_ind+1] = embed_2[scene_mask.bool()].to(object_list_embed.dtype)\n return object_list_embed\n\n def forward_stage1(self, scene_feat, scene_locs, scene_colors, target_captions, is_eval=False, **kwargs):\n object_embed = self.encode_object_feat(scene_feat, scene_locs, scene_colors)\n proj_object_embed = self.object_proj(object_embed)\n proj_object_embed = proj_object_embed.squeeze(1)\n # cls_output = self.cls_head(proj_object_embed)\n # cls_loss = F.cross_entropy(cls_output, target_clses)\n # cls_acc = (cls_output.max(dim=-1)[1] == target_clses).float().mean()\n norm_object_embed = torch.nn.functional.normalize(proj_object_embed, dim=-1) * self.obj_norm_scale\n target_embeds = []\n for target_caption in target_captions:\n target_tokens = self.llama_tokenizer(\n target_caption,\n return_tensors=\"pt\",\n padding=\"longest\",\n truncation=True,\n max_length=self.max_txt_len,\n add_special_tokens=False\n ).to(norm_object_embed.device)\n token_mask = target_tokens[\"attention_mask\"].unsqueeze(-1)\n target_embed = self.llama_model.model.embed_tokens(target_tokens.input_ids) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n target_embed = (target_embed * token_mask).sum(1) / token_mask.sum(1)\n target_embed = target_embed.mean(dim=0)\n target_embeds.append(target_embed)\n target_embeds = torch.stack(target_embeds, dim=0).to(norm_object_embed.device)\n cosine_loss = F.cosine_embedding_loss(norm_object_embed, target_embeds.detach(), torch.tensor([1]).to(norm_object_embed.device))\n l2_loss = F.mse_loss(proj_object_embed, target_embeds.detach())\n # print(torch.norm(pc_embed[:1], p=2), torch.norm(target_embeds[:1], p=2))\n loss = cosine_loss\n return dict(\n loss=loss,\n cosine_loss=cosine_loss,\n # cls_loss=cls_loss,\n l2_loss=l2_loss,\n # cls_acc=cls_acc.detach().cpu(),\n cosine_score=1. - cosine_loss.detach().cpu(),\n obj_norm=proj_object_embed.norm(dim=-1).mean().detach().cpu(),\n target_norm=target_embeds.norm(dim=-1).mean().detach().cpu(),\n l2_dis=l2_loss.detach().cpu()\n )\n\n def forward_stage2(self, scene_feat, scene_locs, scene_colors, scene_mask, detach_mask, obj_ids, questions, answers, is_eval=False, **kwargs):\n object_embed = self.encode_object_feat(scene_feat, scene_locs, scene_colors)\n device = object_embed.device\n batch_size = object_embed.shape[0]\n proj_object_embed = self.object_proj(object_embed)\n norm_object_embed = torch.nn.functional.normalize(proj_object_embed, dim=-1) * self.obj_norm_scale\n # norm_object_embed = proj_object_embed\n proj_scene_embed = None\n if self.add_scene_token: # remember to change the evaluate !!!!!!!!!!!!!!!!!!!!!!(&*&^^#@$%##$%&(*^&%^$%@\n pos_embed = self.pos_proj(scene_locs[:, :, :3])\n scene_embed = proj_object_embed.detach() + 0.1 * pos_embed\n\n # scene_embed = scene_embed.mean(dim=1, keepdim=True).repeat(1, scene_embed.shape[1], 1)\n # proj_scene_embed = scene_embed - proj_object_embed\n\n scene_embed = self.relation_module(scene_embed, scene_locs, scene_mask.bool())\n proj_scene_embed = scene_embed - proj_object_embed.detach()\n\n norm_scene_embed = torch.nn.functional.normalize(proj_scene_embed, dim=-1) * self.scene_norm_scale\n # norm_scene_embed = proj_scene_embed\n input_embed_list, attn_list, target_list = [], [], []\n max_seq_len = 0\n for i, question in enumerate(questions):\n prompt = self.prompt_template.format(question)\n prompt_token = self.llama_tokenizer(prompt, return_tensors=\"pt\", add_special_tokens=False).to(device)\n prompt_embed = self.llama_model.model.embed_tokens(prompt_token.input_ids).detach().squeeze(0)\n # object_list_embed = self.get_object_list_embed(scene_embed[i], scene_mask[i])\n detach_mask = None\n object_list_embed = self.insert_object_embed(norm_object_embed[i], norm_scene_embed[i] if self.add_scene_token else None, scene_mask[i], detach_mask[i] if detach_mask is not None else None)\n # for j in range(obj_num):\n # start_ind = self.object_list_ind[j]\n # assert object_list_embed[start_ind].abs().sum() < 1e-6, (start_ind, object_list_embed[start_ind].sum())\n # assert object_list_embed[start_ind+1].abs().sum() < 1e-6, (start_ind+1, object_list_embed[start_ind+1].sum())\n # object_list_embed[start_ind:start_ind+1, :] = scene_embed[i][j]\n # object_list_embed[start_ind+1:start_ind+2, :] = pos_embed[i][j]\n\n p_0_embed = self.p_0_embed.to(device)\n p_1_embed = self.p_1_embed.to(device)\n\n wrapped_embed = torch.cat([p_0_embed, object_list_embed, p_1_embed, prompt_embed], dim=0)\n wrapped_attn = torch.ones(wrapped_embed.size()[:-1], dtype=torch.long).to(wrapped_embed.device)\n empty_target = (\n torch.ones([wrapped_attn.shape[0]+1], dtype=torch.long).to(device).fill_(-100)\n )\n\n answer = answers[i] + self.end_sym\n to_regress_token = self.llama_tokenizer(answer, return_tensors=\"pt\").to(device)\n # breakpoint()\n answer_target = to_regress_token.input_ids.masked_fill(\n to_regress_token.input_ids == self.llama_tokenizer.pad_token_id, -100\n ).squeeze(0)\n to_regress_embed = self.llama_model.model.embed_tokens(to_regress_token.input_ids).squeeze(0)\n\n target = torch.cat([empty_target, answer_target], dim=0)\n bos = torch.ones([1], dtype=to_regress_token.input_ids.dtype, device=to_regress_token.input_ids.device) * self.llama_tokenizer.bos_token_id\n bos_embed = self.llama_model.model.embed_tokens(bos)\n bos_attn = wrapped_attn[:1]\n input_embed = torch.cat([bos_embed, wrapped_embed, to_regress_embed], dim=0)\n attn = torch.cat([bos_attn, wrapped_attn, to_regress_token.attention_mask[0]], dim=0)\n input_embed_list.append(input_embed)\n attn_list.append(attn)\n target_list.append(target)\n max_seq_len = max(max_seq_len, target.shape[0])\n\n dim = norm_object_embed.shape[2]\n\n input_embeds = torch.zeros([batch_size, max_seq_len, dim], dtype=input_embed_list[0].dtype).to(device)\n attention_mask = torch.zeros([batch_size, max_seq_len], dtype=attn_list[0].dtype).to(device)\n targets = torch.zeros([batch_size, max_seq_len], dtype=target_list[0].dtype).to(device).fill_(-100)\n for i in range(len(input_embed_list)):\n input_embed = input_embed_list[i]\n attn = attn_list[i]\n target = target_list[i]\n input_embeds[i, :input_embed.shape[0], :] = input_embed\n attention_mask[i, :attn.shape[0]] = attn\n targets[i, :target.shape[0]] = target\n\n with self.maybe_autocast():\n outputs = self.llama_model(\n inputs_embeds=input_embeds,\n attention_mask=attention_mask,\n return_dict=True,\n labels=targets,\n )\n\n return dict(\n loss=outputs.loss,\n obj_norm=proj_object_embed.norm(dim=-1).mean().detach().cpu(),\n scene_norm=proj_scene_embed.norm(dim=-1).mean().detach().cpu() if proj_scene_embed is not None else 0.\n )\n\n def forward_stage3(self, scene_feat, scene_attr, scene_mask, target_id, conversations, is_eval=False, **kwargs):\n batch_size, obj_num, _ = scene_feat.shape\n scene_feat = self.encode_and_project(scene_feat, scene_attr)\n pc_embed = torch.gather(scene_feat, 1, target_id.unsqueeze(1).unsqueeze(2).expand(-1, -1, scene_feat.shape[-1]))\n if self.encoder_num_layers > 0:\n scene_feat = self.relation_module(scene_feat, mask=(~scene_mask.bool()).unsqueeze(1).expand(-1, obj_num, -1).unsqueeze(1))\n\n scene_embed = scene_feat * scene_mask.unsqueeze(-1)\n # scene_attn = torch.ones(scene_embed.size()[:-1], dtype=torch.long).to(scene_embed.device)\n max_len = 0\n input_embed_list = []\n p_0_len_list, p_1_len_list = [], []\n target_list = []\n for idx, prompt in enumerate(conversations):\n tmp_scene_embed = scene_embed[idx:idx+1]\n tmp_pc_embed = pc_embed[idx:idx+1]\n p_0, p_ = prompt.split(\"<TargetHere>\")\n p_1, p_2 = p_.split(\"<SceneHere>\")\n p_1 = self.pc_end_token + p_1\n p_0_tokens = self.llama_tokenizer(p_0, return_tensors=\"pt\", add_special_tokens=is_eval).to(tmp_pc_embed.device)\n p_1_tokens = self.llama_tokenizer(p_1, return_tensors=\"pt\", add_special_tokens=False).to(tmp_pc_embed.device)\n p_2_tokens = self.llama_tokenizer(p_2, return_tensors=\"pt\", add_special_tokens=False).to(tmp_pc_embed.device)\n p_0_embeds = self.llama_model.model.embed_tokens(p_0_tokens.input_ids)\n p_1_embeds = self.llama_model.model.embed_tokens(p_1_tokens.input_ids)\n p_2_embeds = self.llama_model.model.embed_tokens(p_2_tokens.input_ids)\n input_embeds = torch.cat([p_0_embeds, tmp_pc_embed, p_1_embeds, tmp_scene_embed, p_2_embeds], dim=1)\n\n sep1 = self.begin_signal + self.role[0] + \":\"\n sep2 = self.begin_signal + self.role[1] + \":\"\n raw_text = p_2.split(sep2)\n for _idx in range(1, len(raw_text)):\n raw_text[_idx] = sep2 + raw_text[_idx]\n answer_targets = p_2_tokens.input_ids.clone()\n system = raw_text[0].split(sep1)[0]\n system_len = self._get_text_len(system.rstrip())\n sep_len = self._get_text_len(sep1.rstrip())\n cur_len = self._get_text_len(raw_text[0].rstrip())\n answer_targets[:, :system_len] = -100\n answer_targets[:, (system_len+sep_len):cur_len] = -100\n for text in raw_text[1:-1]:\n total_len = self._get_text_len(text.rstrip())\n ans_len = self._get_text_len((text.split(sep1)[0]+sep1).rstrip())\n answer_targets[:, (cur_len+ans_len):(cur_len+total_len)] = -100\n cur_len += total_len\n cur_len += self._get_text_len(raw_text[-1].rstrip())\n if cur_len != answer_targets.shape[1]:\n print(f\"The final length is not equal to the original prompt: {prompt}\")\n assert cur_len == answer_targets.shape[1], (cur_len, answer_targets.shape[1])\n\n max_len = max(max_len, input_embeds.shape[1])\n input_embed_list.append(input_embeds)\n p_0_len_list.append(p_0_tokens.input_ids.shape[1])\n p_1_len_list.append(p_1_tokens.input_ids.shape[1])\n target_list.append(answer_targets)\n\n txt_len = min(max_len + 1, self.max_txt_len + obj_num + 1)\n inputs_embeds = torch.ones([batch_size, txt_len], dtype=torch.long).to(pc_embed.device) * self.llama_tokenizer.pad_token_id\n inputs_embeds = self.llama_model.model.embed_tokens(inputs_embeds)\n attention_mask = torch.zeros([batch_size, txt_len], dtype=torch.long).to(pc_embed.device)\n targets = torch.ones([batch_size, txt_len], dtype=torch.long).to(pc_embed.device).fill_(-100)\n inputs_embeds[:, :1] = self.llama_tokenizer.bos_token_id\n for idx in range(batch_size):\n input_len = min(input_embed_list[idx].shape[1], txt_len - 1)\n inputs_embeds[idx, 1:(input_len+1)] = input_embed_list[idx][:, :input_len]\n attention_mask[idx, :(input_len+1)] = 1\n p_0_len = p_0_len_list[idx]\n p_1_len = p_1_len_list[idx]\n targets[idx, (p_0_len+p_1_len+obj_num+2):(input_len+1)] = target_list[idx][0, :(input_len-p_0_len-p_1_len-obj_num-1)]\n\n outputs = self.llama_model(\n inputs_embeds=inputs_embeds,\n attention_mask=attention_mask,\n return_dict=True,\n labels=targets\n )\n\n return dict(\n loss=outputs.loss\n )\n\n def evaluate(self, scene_feat, scene_locs, scene_colors, scene_mask, custom_prompt, is_eval=True, **kwargs):\n object_embed = self.encode_object_feat(scene_feat, scene_locs, scene_colors)\n device = object_embed.device\n batch_size, obj_num = object_embed.shape[:2]\n proj_object_embed = self.object_proj(object_embed)\n norm_object_embed = torch.nn.functional.normalize(proj_object_embed, dim=-1) * self.obj_norm_scale\n # norm_object_embed = proj_object_embed\n if self.add_scene_token:\n pos_embed = self.pos_proj(scene_locs[:, :, :3])\n scene_embed = proj_object_embed + 0.1 * pos_embed\n\n # scene_embed = scene_embed.mean(dim=1, keepdim=True).repeat(1, scene_embed.shape[1], 1)\n # proj_scene_embed = scene_embed - proj_object_embed\n\n scene_embed = self.relation_module(scene_embed, scene_locs, scene_mask.bool())\n proj_scene_embed = scene_embed - proj_object_embed\n\n norm_scene_embed = torch.nn.functional.normalize(proj_scene_embed, dim=-1) * self.scene_norm_scale\n # norm_scene_embed = proj_scene_embed\n\n output_texts = []\n for i in range(batch_size):\n # tmp_scene_embed, _ = self.prompt_wrap(pc_embed[idx:idx+1], scene_embed[idx:idx+1], scene_attn[idx:idx+1], custom_prompt[idx], is_eval)\n p_0, p_1 = custom_prompt[i].split(\"<REPLACE>\")\n p_0_token = self.llama_tokenizer(p_0, return_tensors=\"pt\", add_special_tokens=is_eval).to(device)\n p_1_token = self.llama_tokenizer(p_1, return_tensors=\"pt\", add_special_tokens=False).to(device)\n p_0_embed = self.llama_model.model.embed_tokens(p_0_token.input_ids)\n p_1_embed = self.llama_model.model.embed_tokens(p_1_token.input_ids)\n\n object_list_embed = self.insert_object_embed(norm_object_embed[i], norm_scene_embed[i] if self.add_scene_token else None, scene_mask[i])\n\n # for j in range(obj_num):\n # start_ind = self.object_list_ind[j]\n # object_list_embed[start_ind:start_ind + 1, :] = scene_embed[i][j]\n # object_list_embed[start_ind + 1:start_ind + 2, :] = pos_embed[i][j]\n object_list_embed = object_list_embed.unsqueeze(0)\n wrapped_embed = torch.cat([p_0_embed, object_list_embed, p_1_embed], dim=1)\n stop_words_ids = [torch.tensor([835]).to(wrapped_embed.device),\n torch.tensor([2277, 29937]).to(wrapped_embed.device)]\n stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])\n with self.maybe_autocast():\n outputs = self.llama_model.generate(\n inputs_embeds=wrapped_embed,\n max_new_tokens=min(self.max_txt_len * 2, 512),\n stopping_criteria=stopping_criteria,\n num_beams=1,\n do_sample=True,\n min_length=1,\n top_p=0.9,\n repetition_penalty=1.0,\n length_penalty=1,\n temperature=1.0,\n )\n output_token = outputs[0]\n if output_token[0] == 0: # the model might output an unknown token <unk> at the beginning. remove it\n output_token = output_token[1:]\n if output_token[0] == 1: # some users find that there is a start token <s> at the beginning. remove it\n output_token = output_token[1:]\n output_text = self.llama_tokenizer.decode(output_token, add_special_tokens=False)\n output_text = output_text.split(self.end_sym)[0]\n output_texts.append(output_text)\n\n return output_texts\n\n def forward(self, **kwargs):\n if \"target_captions\" in kwargs:\n return self.forward_stage1(**kwargs)\n if \"answers\" in kwargs:\n return self.forward_stage2(**kwargs)\n if \"conversations\" in kwargs:\n return self.forward_stage3(**kwargs)\n if \"custom_prompt\" in kwargs:\n return self.evaluate(**kwargs)\n return None\n\n def _get_text_len(self, text):\n return self.llama_tokenizer(text, return_tensors=\"pt\", add_special_tokens=False).input_ids.shape[1]\n\n def maybe_autocast(self, dtype=torch.float16):\n # if on cpu, don't use autocast\n # if on gpu, use autocast with dtype if provided, otherwise use torch.float16\n enable_autocast = self.device != torch.device(\"cpu\")\n\n if enable_autocast:\n return torch.cuda.amp.autocast(dtype=dtype)\n else:\n return contextlib.nullcontext()\n\n @property\n def device(self):\n return list(self.parameters())[0].device"
},
{
"identifier": "setup_main",
"path": "utils/config_utils.py",
"snippet": "def setup_main():\n \"\"\"\n Setup config, logger, output_dir, etc.\n Shared for pretrain and all downstream tasks.\n \"\"\"\n config = setup_config()\n if hasattr(config, \"evaluate\") and config.evaluate:\n config = setup_evaluate_config(config)\n init_distributed_mode(config)\n\n if is_main_process():\n setup_output_dir(config.output_dir, excludes=[\"code\"])\n setup_logger(output=config.output_dir, color=True, name=\"vindlu\")\n logger.info(f\"config: {Config.pretty_text(config)}\")\n Config.dump(config, os.path.join(config.output_dir, \"config.json\"))\n return config"
},
{
"identifier": "setup_model",
"path": "tasks/shared_utils.py",
"snippet": "def setup_model(\n config, model_cls, find_unused_parameters=False\n):\n logger.info(\"Creating model\")\n config = copy.deepcopy(config)\n\n model = model_cls(config=config.model)\n\n model = model.to(torch.device(config.device))\n model_without_ddp = model\n if config.distributed:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[config.gpu],\n find_unused_parameters=find_unused_parameters, # `False` for image-only task\n )\n\n optimizer = create_optimizer(config.optimizer, model)\n scheduler = create_scheduler(config.scheduler, optimizer)\n scaler = torch.cuda.amp.GradScaler(enabled=config.fp16, growth_interval=100)\n\n start_epoch = 0\n global_step = 0\n\n # auto resume the latest checkpoint\n if config.get(\"auto_resume\", False):\n logger.info(\"Auto resuming\")\n model_latest = join(config.output_dir, \"ckpt_latest.pth\")\n model_best = join(config.output_dir, \"ckpt_best.pth\")\n large_num = -1\n for p in os.listdir(config.output_dir):\n if 'ckpt' in p:\n num = p.split('_')[1].split('.')[0]\n if str.isnumeric(num):\n if int(num) > large_num:\n large_num = int(num)\n if large_num != -1:\n model_latest = join(config.output_dir, f\"ckpt_{large_num:02d}.pth\")\n if osp.isfile(model_latest) and not config.pretrained_path:\n config.pretrained_path = model_latest\n config.resume = True\n elif osp.isfile(model_best) and not config.pretrained_path:\n config.pretrained_path = model_best\n config.resume = True\n else:\n logger.info(f\"Not found checkpoint in {config.output_dir}\")\n\n if osp.isfile(config.pretrained_path):\n checkpoint = torch.load(config.pretrained_path, map_location=\"cpu\")\n state_dict = checkpoint[\"model\"]\n\n if config.resume:\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n scheduler.load_state_dict(checkpoint[\"scheduler\"])\n scaler.load_state_dict(checkpoint[\"scaler\"])\n start_epoch = checkpoint[\"epoch\"] + 1\n global_step = checkpoint[\"global_step\"]\n\n # for k in list(state_dict.keys()):\n # if \"relation_module\" in k:\n # del state_dict[k]\n\n msg = model_without_ddp.load_state_dict(state_dict, strict=False)\n # object_proj_dict = {}\n # for k in state_dict.keys():\n # if \"object_proj\" in k:\n # object_proj_dict[k.split(\"object_proj.\")[1]] = state_dict[k]\n # model_without_ddp.scene_proj.load_state_dict(object_proj_dict, strict=False)\n logger.info(msg)\n logger.info(f\"Loaded checkpoint from {config.pretrained_path}\")\n else:\n logger.warning(\"No pretrained checkpoint provided, training from scratch\")\n\n return (\n model,\n model_without_ddp,\n optimizer,\n scheduler,\n scaler,\n start_epoch,\n global_step,\n )"
},
{
"identifier": "setup_seed",
"path": "utils/basic_utils.py",
"snippet": "def setup_seed(seed):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)"
},
{
"identifier": "get_rank",
"path": "utils/distributed.py",
"snippet": "def get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()"
},
{
"identifier": "process_batch_data",
"path": "dataset/base_dataset.py",
"snippet": "def process_batch_data(scene_feats, scene_locs, scene_colors):\n max_obj_num = max([e.shape[0] for e in scene_feats])\n # max_obj_num = 110\n batch_size = len(scene_feats)\n batch_scene_feat = torch.zeros(batch_size, max_obj_num, scene_feats[0].shape[-1])\n batch_scene_locs = torch.zeros(batch_size, max_obj_num, scene_locs[0].shape[-1])\n batch_scene_colors = torch.zeros(batch_size, max_obj_num, scene_colors[0].shape[-2], scene_colors[0].shape[-1])\n batch_scene_mask = torch.zeros(batch_size, max_obj_num, dtype=torch.long)\n for i in range(batch_size):\n batch_scene_feat[i][:scene_feats[i].shape[0]] = scene_feats[i]\n batch_scene_locs[i][:scene_locs[i].shape[0]] = scene_locs[i]\n batch_scene_colors[i][:scene_colors[i].shape[0]] = scene_colors[i]\n batch_scene_mask[i][:scene_feats[i].shape[0]] = 1\n return batch_scene_feat, batch_scene_locs, batch_scene_colors, batch_scene_mask"
}
] | import json
import jsonlines
import math
import torch
import sys
import torch
from models.chat3d import Chat3D
from utils.config_utils import setup_main
from tasks.shared_utils import setup_model
from utils.basic_utils import setup_seed
from utils.distributed import get_rank
from dataset.base_dataset import process_batch_data
from tqdm import tqdm | 9,540 | """
loss/og3d: 2.9594, loss/obj3d_clf: 3.3753, loss/obj3d_clf_pre: 2.0714, loss/txt_clf: 0.6708, loss/total: 10.2789, loss/cross_attn_0: 0.0032, loss/cross_attn_1: 0.0011, loss/cross_attn_2: 0.0011, loss/cross_attn_3: 0.0012, loss/self_attn_0: 0.1595, loss/self_attn_1: 0.0425, loss/self_attn_2: 0.0541, loss/self_attn_3: 0.1030, loss/hidden_state_0: 0.3919, loss/hidden_state_1: 0.0765, loss/hidden_state_2: 0.1033, loss/hidden_state_3: 0.1308, loss/hidden_state_4: 0.1337, acc/og3d: 0.6373, acc/og3d_class: 0.8903, acc/obj3d_clf: 0.6828, acc/obj3d_clf_pre: 0.6131, acc/txt_clf: 0.9281
"""
val_file = "/root/scene-LLaMA/datasets/exprs_neurips22/gtlabelpcd_mix/nr3d/preds/val_outs.json"
nr3d_anno_file = "/root/scene-LLaMA/datasets/referit3d/annotations/bert_tokenized/nr3d.jsonl"
anno_root = "annotations" # annotation dir
attribute_file = f"{anno_root}/scannet_attributes_old.json"
attributes = json.load(open(attribute_file, 'r'))
val_results = json.load(open(val_file))
nr3d_anno = {}
with jsonlines.open(nr3d_anno_file, "r") as reader:
for l in reader:
nr3d_anno[l["item_id"]] = l
item_list = []
acc = 0
for k, v in val_results.items():
obj_ids = v["obj_ids"]
obj_logits = v["obj_logits"]
obj_logits = (torch.tensor(obj_logits)).softmax(dim=-1).tolist()
scan_id = nr3d_anno[k]["scan_id"]
utter = nr3d_anno[k]["utterance"]
target_id = nr3d_anno[k]["target_id"]
obj_num = len(attributes[scan_id]["locs"])
assert target_id < obj_num, f"{obj_num}, {target_id}, {scan_id}"
logit_ids = zip(obj_logits, obj_ids)
logit_ids = sorted(logit_ids, reverse=True)
logits, ids = zip(*logit_ids)
# logits = (torch.tensor(logits[:5]) / 5.).softmax(dim=-1).tolist()
print(logits)
if ids[0] == target_id:
acc += 1
item_list.append({
"can_ids": ids[:5],
"can_preds": logits[:5],
"utter": utter,
"target_id": target_id,
"scan_id": scan_id
})
# print(target_id)
# print(ids[:5])
# print(logits[:5])
# exit()
print("Acc:", float(acc) / len(item_list))
# print(item_list[:5])
# exit()
sys.path.append(".")
config = setup_main()
setup_seed(config.seed + get_rank())
device = torch.device(config.device)
num_steps_per_epoch = 7000
config.scheduler.num_training_steps = num_steps_per_epoch * config.scheduler.epochs
config.scheduler.num_warmup_steps = num_steps_per_epoch * config.scheduler.warmup_epochs
model_cls = eval(config.model.get('model_cls', 'Chat3D'))
(
model,
_,
optimizer,
scheduler,
scaler,
start_epoch,
global_step,
| """
loss/og3d: 2.9594, loss/obj3d_clf: 3.3753, loss/obj3d_clf_pre: 2.0714, loss/txt_clf: 0.6708, loss/total: 10.2789, loss/cross_attn_0: 0.0032, loss/cross_attn_1: 0.0011, loss/cross_attn_2: 0.0011, loss/cross_attn_3: 0.0012, loss/self_attn_0: 0.1595, loss/self_attn_1: 0.0425, loss/self_attn_2: 0.0541, loss/self_attn_3: 0.1030, loss/hidden_state_0: 0.3919, loss/hidden_state_1: 0.0765, loss/hidden_state_2: 0.1033, loss/hidden_state_3: 0.1308, loss/hidden_state_4: 0.1337, acc/og3d: 0.6373, acc/og3d_class: 0.8903, acc/obj3d_clf: 0.6828, acc/obj3d_clf_pre: 0.6131, acc/txt_clf: 0.9281
"""
val_file = "/root/scene-LLaMA/datasets/exprs_neurips22/gtlabelpcd_mix/nr3d/preds/val_outs.json"
nr3d_anno_file = "/root/scene-LLaMA/datasets/referit3d/annotations/bert_tokenized/nr3d.jsonl"
anno_root = "annotations" # annotation dir
attribute_file = f"{anno_root}/scannet_attributes_old.json"
attributes = json.load(open(attribute_file, 'r'))
val_results = json.load(open(val_file))
nr3d_anno = {}
with jsonlines.open(nr3d_anno_file, "r") as reader:
for l in reader:
nr3d_anno[l["item_id"]] = l
item_list = []
acc = 0
for k, v in val_results.items():
obj_ids = v["obj_ids"]
obj_logits = v["obj_logits"]
obj_logits = (torch.tensor(obj_logits)).softmax(dim=-1).tolist()
scan_id = nr3d_anno[k]["scan_id"]
utter = nr3d_anno[k]["utterance"]
target_id = nr3d_anno[k]["target_id"]
obj_num = len(attributes[scan_id]["locs"])
assert target_id < obj_num, f"{obj_num}, {target_id}, {scan_id}"
logit_ids = zip(obj_logits, obj_ids)
logit_ids = sorted(logit_ids, reverse=True)
logits, ids = zip(*logit_ids)
# logits = (torch.tensor(logits[:5]) / 5.).softmax(dim=-1).tolist()
print(logits)
if ids[0] == target_id:
acc += 1
item_list.append({
"can_ids": ids[:5],
"can_preds": logits[:5],
"utter": utter,
"target_id": target_id,
"scan_id": scan_id
})
# print(target_id)
# print(ids[:5])
# print(logits[:5])
# exit()
print("Acc:", float(acc) / len(item_list))
# print(item_list[:5])
# exit()
sys.path.append(".")
config = setup_main()
setup_seed(config.seed + get_rank())
device = torch.device(config.device)
num_steps_per_epoch = 7000
config.scheduler.num_training_steps = num_steps_per_epoch * config.scheduler.epochs
config.scheduler.num_warmup_steps = num_steps_per_epoch * config.scheduler.warmup_epochs
model_cls = eval(config.model.get('model_cls', 'Chat3D'))
(
model,
_,
optimizer,
scheduler,
scaler,
start_epoch,
global_step, | ) = setup_model( | 2 | 2023-12-11 14:39:58+00:00 | 12k |
SqueezeBits/owlite | owlite/owlite.py | [
{
"identifier": "OWLITE_DEVICE_NAME",
"path": "owlite_core/cli/device.py",
"snippet": "OWLITE_DEVICE_NAME = CONNECTED_DEVICE[\"device\"] if CONNECTED_DEVICE else None"
},
{
"identifier": "OWLITE_FRONT_BASE_URL",
"path": "owlite_core/constants.py",
"snippet": "OWLITE_FRONT_BASE_URL = \"https://owlite.ai\""
},
{
"identifier": "OWLITE_REPO_PATH",
"path": "owlite_core/constants.py",
"snippet": "OWLITE_REPO_PATH = os.path.join(os.getenv(\"OWLITE_REPO_DIR\", os.path.join(os.getcwd(), \"owlite\")))"
},
{
"identifier": "OWLITE_REPORT_URL",
"path": "owlite_core/constants.py",
"snippet": "OWLITE_REPORT_URL = \"https://tally.so/r/mOl5Zk\""
},
{
"identifier": "OWLITE_SETTINGS",
"path": "owlite_core/owlite_settings.py",
"snippet": "OWLITE_SETTINGS = OwLiteSettings()"
},
{
"identifier": "download_trt_engine",
"path": "owlite/api/device/devices.py",
"snippet": "def download_trt_engine(benchmark_key: str, path_to_save: str) -> None:\n \"\"\"Downloads built TensorRT engine.\n\n Args:\n benchmark_key (str): A key to identify benchmark job.\n path_to_save (str): The path to save downloaded TensorRT engine.\n\n Raises:\n RuntimeError: When device is not set.\n HTTPError: When request was not successful.\n \"\"\"\n device_name = OWLITE_DEVICE_NAME\n if device_name is None:\n log.error(\"Device is not set. Please set device and try again\")\n raise RuntimeError(\"Device not found\")\n\n payload = {\n \"device_name\": device_name,\n \"benchmark_key\": benchmark_key,\n }\n resp = DEVICE_API_BASE.post(\"/devices/trt\", json=payload)\n assert isinstance(resp, dict)\n\n file_url = resp[\"trt_engine_url\"]\n\n download_file_from_url(file_url, path_to_save)"
},
{
"identifier": "poll_run_benchmark",
"path": "owlite/api/device/devices.py",
"snippet": "def poll_run_benchmark(project_id: str, benchmark_key: str) -> None:\n \"\"\"Polls for TensorRT benchmark result.\n\n Args:\n project_id (str): The id of a project.\n benchmark_key (str): A key to identify benchmark job.\n\n Raises:\n ValueError: When unexpected signal is caught by SIGINT handler.\n RuntimeError: When error occurred during TensorRT execution.\n \"\"\"\n\n def sigint_handler(sig: signal.Signals, frame: Any) -> None:\n if sig != signal.SIGINT:\n raise ValueError(f\"Unexpected signals: {sig} (frame={frame})\")\n print(\"\")\n log.info(\n f\"Exit from current experiment. \"\n f\"Continue creating config at \"\n f\"{OWLITE_FRONT_BASE_URL}/project/detail/{project_id}\"\n )\n sys.exit(sig)\n\n original_sigint_handler = signal.signal(signal.SIGINT, sigint_handler) # type: ignore\n\n log.info(\"Polling for benchmark result, you are free to CTRL-C away\")\n\n count = 0\n info = get_benchmark_queue_info(benchmark_key)\n benchmark_status = info[\"benchmark_status\"]\n in_progress = (\n BenchmarkStatus.PRE_FETCHING.value,\n BenchmarkStatus.BENCHMARKING.value,\n )\n while True:\n if count % 5 == 0:\n info = get_benchmark_queue_info(benchmark_key)\n new_status = info[\"benchmark_status\"]\n\n if new_status < 0:\n print(\"\")\n log.error(\n \"Runtime error occurred during TensorRT engine execution or benchmark. Please try again. \"\n f\"If the problem persists, please report us at {OWLITE_REPORT_URL} for further assistance\"\n )\n raise RuntimeError(\"Benchmarking error\")\n\n if benchmark_status != new_status and new_status in in_progress:\n benchmark_status = new_status\n count = 0\n\n elif new_status == BenchmarkStatus.BENCHMARK_DONE.value:\n print(\"\\nBenchmarking done\")\n signal.signal(signal.SIGINT, original_sigint_handler)\n return\n\n if benchmark_status in in_progress:\n if benchmark_status == BenchmarkStatus.PRE_FETCHING.value and info[\"prefetch\"] is not None:\n message = f\"Your position in the queue: {info['prefetch']} {'. ' * (count % 4)}\"\n\n else:\n dots_before = \".\" * count\n owl_emoji = \"\\U0001F989\"\n dots_after = \".\" * (19 - count)\n\n message = f\"[{dots_before}{owl_emoji}{dots_after}]\"\n\n print(f\"\\r{message:<50}\", end=\"\", flush=True)\n\n count = (count + 1) % 20\n time.sleep(2)"
},
{
"identifier": "request_trt_benchmark",
"path": "owlite/api/device/devices.py",
"snippet": "def request_trt_benchmark(benchmark_key: str, bin_path: str) -> None:\n \"\"\"Uploads ONNX weight binary file and request TensorRT benchmark.\n\n Args:\n benchmark_key (str): A key to identify benchmark job.\n bin_path (str): The path of a ONNX weight binary file.\n\n Raises:\n FileNotFoundError: When bin file does not exists at given path.\n ValueError: When device is not set.\n HTTPError: When request was not successful.\n \"\"\"\n\n if not os.path.exists(bin_path):\n log.error(\n f\"Unable to locate the ONNX bin file at the specified path: {bin_path}. \"\n \"Please ensure the file exists and the path is accurate. \"\n \"If the file is missing, recreate the ONNX file and retry\"\n )\n raise FileNotFoundError(\"ONNX bin file not found\")\n\n device_name = OWLITE_DEVICE_NAME\n if device_name is None:\n log.error(\"Connected device not found. Please connect device by 'owlite device connect'\")\n raise ValueError(\"Device not found\")\n\n payload = {\n \"device_name\": device_name,\n \"benchmark_key\": benchmark_key,\n }\n\n resp = DEVICE_API_BASE.post(\"/devices/jobs/export\", json=payload)\n assert isinstance(resp, dict)\n\n file_dest_url = resp[\"bin_file_url\"]\n\n file_upload_resp = upload_file_to_url(bin_path, file_dest_url)\n if not file_upload_resp.ok:\n file_upload_resp.raise_for_status()"
},
{
"identifier": "get_configuration",
"path": "owlite/api/dove/doves.py",
"snippet": "def get_configuration(\n project_id: str,\n baseline_name: str,\n run_name: str,\n) -> str:\n \"\"\"Gets configuration options to apply.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of a run.\n\n Returns:\n str: The compiled configuration string.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n resp = DOVE_API_BASE.post(\"/compile\", json=payload)\n assert isinstance(resp, dict)\n\n return json.dumps(resp)"
},
{
"identifier": "upload_baseline",
"path": "owlite/api/dove/doves.py",
"snippet": "def upload_baseline(\n project_id: str,\n baseline_name: str,\n onnx_path: str,\n model: GraphModule,\n) -> None:\n \"\"\"Uploads baseline's onnx proto and graph module.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n onnx_path (str): The path to baseline onnx proto file.\n model (GraphModule): The traced graph module.\n\n Raises:\n TypeError: When the `model` is not an instance of `torch.fx.GraphModule`.\n HTTPError: When the request was not successful.\n \"\"\"\n if isinstance(model, (DataParallel, DistributedDataParallel)):\n _model_type = f\"torch.nn.parallel.{type(model).__name__}\"\n log.error(\n f\"{_model_type} is not supported by upload_baseline, please use 'attribute' module to unwrap model \"\n f\"{_model_type}. Try owlite.api.dove.doves.upload_baseline(..., model = model.module)\"\n )\n raise TypeError(f\"{_model_type} is not supported by upload_baseline\")\n if not isinstance(model, GraphModule):\n raise TypeError(f\"model of upload_baseline must be GraphModule, but got {type(model)}\")\n\n proto = onnx.load(onnx_path, load_external_data=False)\n input_shape = json.dumps(extract_input_signature_from_onnx_proto(proto))\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"gm\": serialize(model),\n \"onnx\": base64.b64encode(proto.SerializeToString()),\n \"input_shape\": input_shape,\n }\n\n DOVE_API_BASE.post(\"/upload\", payload)"
},
{
"identifier": "check_baseline_existence",
"path": "owlite/api/main/baselines.py",
"snippet": "def check_baseline_existence(project_id: str, baseline_name: str) -> bool:\n \"\"\"Checks if baseline with given name exists at project with given project id.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name to check.\n\n Returns:\n bool: True if baseline exists in given project, False otherwise.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n }\n\n try:\n resp = MAIN_API_BASE.post(\"/projects/baselines/check\", json=payload)\n assert isinstance(resp, bool)\n\n return resp\n\n except requests.exceptions.HTTPError as e:\n if e.response is not None and e.response.status_code == 404:\n return False\n\n raise e"
},
{
"identifier": "create_baseline",
"path": "owlite/api/main/baselines.py",
"snippet": "def create_baseline(project_id: str, baseline_name: str) -> str:\n \"\"\"Creates a baseline experiment with given baseline name at project with given project id.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline to create.\n\n Returns:\n str: The name of created baseline.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/baselines\", json=payload)\n assert isinstance(resp, dict)\n\n return resp[\"baseline_name\"]"
},
{
"identifier": "create_or_load_project",
"path": "owlite/api/main/projects.py",
"snippet": "def create_or_load_project(project_name: str, description: str = \"\") -> str:\n \"\"\"Creates a project with given name and description and return the id of created project, if\n a project with given name already exists and accessible by current user, return the id of\n existing project.\n\n Args:\n project_name (str): The name of a project.\n description (str): The description of a project. Defaults to \"\".\n\n Returns:\n str: The id of a created project.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n log.debug(f\"Attempt creating project with name {project_name}.\")\n\n payload = {\n \"project_name\": project_name,\n \"description\": description,\n }\n\n try:\n resp = MAIN_API_BASE.post(\"/projects\", json=payload)\n\n assert isinstance(resp, dict) and resp[\"name\"] == project_name\n\n log.info(f\"Created new project '{project_name}'\")\n return resp[\"id\"]\n\n except HTTPError as err:\n if err.response is not None and err.response.status_code == 409:\n # project with given name already was created by user before\n\n data = json.loads(err.response.content)\n project_id = data[\"detail\"]\n\n log.debug(f\"Conflict detected, project with name {project_name} already exists, loading existing project.\")\n log.info(f\"Loaded existing project '{project_name}'\")\n return project_id\n\n raise err"
},
{
"identifier": "copy_run",
"path": "owlite/api/main/runs.py",
"snippet": "def copy_run(project_id: str, baseline_name: str, duplicate_from: str, run_name: str) -> str:\n \"\"\"Copies existing experiment and create a new experiment. Compression configuration is also cloned.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n duplicate_from (str): The name of an experiment to clone.\n run_name (str): The name of a new experiment.\n\n Returns:\n str: The name of a created experiment.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": duplicate_from,\n \"new_run_name\": run_name,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/runs/copy\", json=payload)\n assert isinstance(resp, dict)\n return str(resp[\"name\"])"
},
{
"identifier": "create_run",
"path": "owlite/api/main/runs.py",
"snippet": "def create_run(project_id: str, baseline_name: str, run_name: str) -> None:\n \"\"\"Creates an experiment.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of a new experiment.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n\n res = MAIN_API_BASE.post(\"/projects/runs\", json=payload)\n assert isinstance(res, dict)"
},
{
"identifier": "get_benchmark_key",
"path": "owlite/api/main/runs.py",
"snippet": "def get_benchmark_key(project_id: str, baseline_name: str, run_name: str) -> str:\n \"\"\"Gets a key to identify a benchmark job.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n\n Returns:\n str: A key to identify a benchmark job.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/runs/keys\", json=payload)\n\n assert isinstance(resp, str)\n return resp"
},
{
"identifier": "get_run_info",
"path": "owlite/api/main/runs.py",
"snippet": "def get_run_info(project_id: str, baseline_name: str, run_name: str) -> Optional[dict]:\n \"\"\"Gets information of an experiment.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n\n Returns:\n Optional[dict]: The information of an experiment if exists, None otherwise.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n }\n\n try:\n res = MAIN_API_BASE.post(\"/projects/runs/info\", json=payload)\n\n except requests.exceptions.HTTPError as e:\n if e.response is not None and e.response.status_code == 404:\n return None\n\n raise e\n\n assert isinstance(res, dict)\n return res"
},
{
"identifier": "update_run_info",
"path": "owlite/api/main/runs.py",
"snippet": "def update_run_info(\n project_id: str,\n baseline_name: str,\n run_name: str,\n logs: str,\n) -> None:\n \"\"\"Updates information for a specific experiment with model metrics.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n logs (str): Logs to be stored in the database.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n \"logs\": logs,\n }\n\n resp = MAIN_API_BASE.post(\"/projects/runs/update\", json=payload)\n assert isinstance(resp, str)"
},
{
"identifier": "upload_run_onnx_proto",
"path": "owlite/api/main/runs.py",
"snippet": "def upload_run_onnx_proto(\n project_id: str,\n baseline_name: str,\n run_name: str,\n onnx_path: str,\n dynamic_axes: Optional[dict[str, dict[int, dict[str, int]]]] = None,\n) -> None:\n \"\"\"Uploads experiment's onnx proto and graph module. Note that parameters are not uploaded.\n\n Args:\n project_id (str): The id of a project.\n baseline_name (str): The name of a baseline.\n run_name (str): The name of an experiment.\n onnx_path (str): The path to experiment onnx proto file.\n dynamic_axes (Optional[dict[str, dict[int, dict[str, int]]]], optional): Dynamic axes setting,\n please refer to owlite.onnx.export for detail.\n\n Raises:\n HTTPError: When request was not successful.\n \"\"\"\n\n input_signature = extract_input_signature_from_onnx_proto(onnx_path)\n if dynamic_axes is not None:\n new_input_signature = []\n for name, shape in input_signature:\n axis_setting = dynamic_axes.get(name)\n if axis_setting is not None:\n axis = next(iter(axis_setting))\n setting = axis_setting.get(axis)\n assert setting is not None\n range_setting = [\n setting.get(\"min\"),\n setting.get(\"opt\"),\n setting.get(\"max\"),\n setting.get(\"test\"),\n ]\n shape[axis] = range_setting # type: ignore\n new_input_signature.append((name, shape))\n input_signature = new_input_signature\n\n payload = {\n \"project_id\": project_id,\n \"baseline_name\": baseline_name,\n \"run_name\": run_name,\n \"input_shape\": json.dumps(input_signature),\n }\n\n file_dest_url = MAIN_API_BASE.post(\"/projects/runs/data/upload\", json=payload)\n\n assert file_dest_url is not None and isinstance(file_dest_url, str)\n file_upload_resp = upload_file_to_url(onnx_path, file_dest_url)\n\n if not file_upload_resp.ok:\n file_upload_resp.raise_for_status()"
},
{
"identifier": "symbolic_trace",
"path": "owlite/backend/fx/trace.py",
"snippet": "def symbolic_trace(model: torch.nn.Module, *args: Tensor, **kwargs: dict[str, Any]) -> GraphModule:\n \"\"\"Like `torch.fx.symbolic_trace`, this function traces the input `model` to convert it into a GraphModule.\n In order for the tracing to be successful, the `model` must be able to pass `torch.compile(model, fullgraph=True)`.\n\n Args:\n model (torch.nn.Module): a torch.nn.Module instance.\n\n Raises:\n TypeError: if the `model` is not an instance of `torch.nn.Module`\n RuntimeError: if the tracing fails.\n\n Returns:\n GraphModule: the converted GraphModule.\n \"\"\"\n if not isinstance(model, torch.nn.Module):\n raise TypeError(f\"Expected torch.nn.Module instance but object of type {type(model)} given: {model}\")\n if isinstance(model, (DataParallel, DistributedDataParallel)):\n _model_type = f\"torch.nn.parallel.{type(model).__name__}\"\n log.error(\n f\"{_model_type} is not supported by symbolic trace, please use 'attribute' module to unwrap model \"\n f\"from {_model_type}. Try owlite.fx.symbolic_trace(model.module, ...)\"\n )\n raise TypeError(f\"{_model_type} is not supported by symbolic trace\")\n training_status = model.training\n # move input args and kwargs to model device\n device = get_most_common_device(model)\n dtype = get_most_common_floating_point_type(model)\n log.debug(f\"Tracing with device={device}, dtype={dtype}\")\n\n args = move_tensors_to(args, device, dtype)\n kwargs = move_tensors_to(kwargs, device, dtype)\n\n backend = BackendProvider()\n torch_dynamo.reset()\n optimized_model = torch.compile(model, fullgraph=True, backend=backend)\n output = optimized_model(*args, **kwargs)\n\n graph_module = backend.graph_module\n\n if graph_module is None:\n raise RuntimeError(\"Failed to create torch.fx.GraphModule while running optimized model\")\n\n graph_module = apply_graph_module_transforms(graph_module)\n graph_module = insert_output_adapter(graph_module, output)\n\n original_params = inspect.signature(model.forward).parameters\n graph_module_params = inspect.signature(graph_module.forward).parameters\n\n ignored_params = OrderedDict(\n filter(\n lambda item: (\n item[0] not in graph_module_params\n and item[1].kind\n not in (\n inspect._ParameterKind.VAR_POSITIONAL,\n inspect._ParameterKind.VAR_KEYWORD,\n )\n ),\n original_params.items(),\n )\n )\n if ignored_params:\n log.warning(\n \"The following parameters will be dropped from the graph module's forward method: \"\n f\"{', '.join(ignored_params)}\"\n )\n graph_module.train(training_status)\n graph_module.meta[\"owlite_status\"] = OwLiteStatus.NOT_COMPRESSED\n return graph_module"
},
{
"identifier": "configure_dynamic_dimensions",
"path": "owlite/backend/onnx/dynamize.py",
"snippet": "def configure_dynamic_dimensions(\n input_signature: list[tuple[str, Union[tuple[int, ...], str]]], dynamic_axes: dict[str, dict[int, dict[str, int]]]\n) -> DynamicDimensions:\n \"\"\"Configures dynamic dimension setting to be used by `dynamize` with given ONNX proto and dynamic axes setting.\n\n Args:\n input_signature (list[tuple[str, Union[tuple[int, ...], str]]]): A list of tuples mapping fx graph input names\n to their shape if they are torch.Tensor instances or to their class name otherwise.\n dynamic_axes (Optional[dict[str, dict[int, dict[str, int]]]], optional):\n To specify axes of tensors dynamic(i.e. known only at run-time), set `dynamic_axes` to a dict with schema:\n\n * KEY (str): an input name.\n\n * VALUE (dict[int, dict[str, int]]): a single item dictionary whose key is dynamic dimension of input\n and value is a dynamic range setting dictionary containing min, opt, max, test dimension size settings.\n\n Raises:\n ValueError: When dynamic ONNX proto is given or when invalid `dynamic_axes` is given.\n\n Returns:\n DynamicDimensions: Dynamic dimension setting to be used as an input of `dynamize`.\n \"\"\"\n\n if not check_dynamic_axes_setting(input_signature, dynamic_axes):\n raise ValueError(\"Invalid dynamic axes setting\")\n\n settings = {}\n dynamic_dim_size = None\n onnx_inputs_dict = dict(input_signature)\n for name, setting in dynamic_axes.items():\n dynamic_axis = next(iter(setting))\n\n shape = onnx_inputs_dict[name]\n assert shape is not None\n\n dynamic_dim_size = shape[dynamic_axis]\n\n min_val = setting[dynamic_axis].get(\"min\")\n max_val = setting[dynamic_axis].get(\"max\")\n opt_val = setting[dynamic_axis].get(\"opt\")\n opt_val = setting[dynamic_axis].get(\"test\")\n\n if dynamic_axis < 0:\n dynamic_axis = len(shape) + dynamic_axis\n\n settings[name] = DynamicSetting(shape, dynamic_axis, min_val, max_val, opt_val) # type: ignore\n\n assert dynamic_dim_size is not None and isinstance(dynamic_dim_size, int)\n return DynamicDimensions(dynamic_dim_size, settings)"
},
{
"identifier": "export",
"path": "owlite/backend/onnx/export.py",
"snippet": "def export(\n module: torch.nn.Module,\n args: Union[tuple[Any, ...], torch.Tensor],\n f: str,\n export_params: bool = True,\n verbose: bool = False,\n training: torch._C._onnx.TrainingMode = torch._C._onnx.TrainingMode.EVAL,\n input_names: Optional[Sequence[str]] = None,\n output_names: Optional[Sequence[str]] = None,\n operator_export_type: torch._C._onnx.OperatorExportTypes = torch._C._onnx.OperatorExportTypes.ONNX,\n opset_version: int = 17,\n do_constant_folding: bool = True,\n keep_initializers_as_inputs: Optional[bool] = None,\n custom_opsets: Optional[Mapping[str, int]] = None,\n export_modules_as_functions: Union[bool, Collection[type[torch.nn.Module]]] = False,\n use_fast_export: bool = True,\n apply_transforms: bool = True,\n simplify: bool = True,\n check_n: int = 1,\n skip_fuse_bn: bool = False,\n skipped_optimizers: Optional[list[str]] = None,\n dynamic_dimensions: Optional[DynamicDimensions] = None,\n) -> None:\n r\"\"\"Exports a model into ONNX format.\n\n Args:\n module (torch.nn.Module): The model to be exported.\n args (Union[tuple[Any, ...], torch.Tensor]): Argument of a `module`.\n\n args can be structured either as:\n\n 1. ONLY A TUPLE OF ARGUMENTS::\n\n args = (x, y, z)\n\n The tuple should contain model inputs such that `module(*args)` is a valid\n invocation of the model. Any non-Tensor arguments will be hard-coded into the\n exported model; any Tensor arguments will become inputs of the exported model,\n in the order they occur in the tuple.\n\n 2. A TENSOR::\n\n args = torch.Tensor([1])\n\n This is equivalent to a 1-ary tuple of that Tensor.\n\n 3. A TUPLE OF ARGUMENTS ENDING WITH A DICTIONARY OF NAMED ARGUMENTS::\n\n args = (\n x,\n {\n \"y\": input_y,\n \"z\": input_z\n }\n )\n\n All but the last element of the tuple will be passed as non-keyword arguments,\n and named arguments will be set from the last element. If a named argument is\n not present in the dictionary, it is assigned the default value, or None if a\n default value is not provided.\n\n .. note::\n If a dictionary is the last element of the args tuple, it will be\n interpreted as containing named arguments. In order to pass a dict as the\n last non-keyword arg, provide an empty dict as the last element of the args\n tuple. For example, instead of::\n\n export(\n module,\n (\n x,\n # WRONG: will be interpreted as named arguments\n {y: z}\n ),\n \"test.onnx.pb\"\n )\n\n Write::\n\n export(\n module,\n (\n x,\n {y: z},\n {}\n ),\n \"test.onnx.pb\"\n )\n f (str): A string containing a file name. A binary protocol buffer will be written to this file.\n export_params (bool, optional): If True, all parameters will\n be exported. Set this to False if you want to export an untrained model.\n In this case, the exported model will first take all of its parameters\n as arguments, with the ordering as specified by `module.state_dict().values()`. Defaults to True.\n verbose (bool, optional): If True, prints a description of the\n model being exported to stdout. In addition, the final ONNX graph will include the\n field `doc_string` from the exported model which mentions the source code locations\n for `module`. If True, ONNX exporter logging will be turned on. Defaults to False.\n training (torch._C._onnx.TrainingMode, optional): Defaults to torch._C._onnx.TrainingMode.EVAL.\n * `TrainingMode.EVAL`: export the model in inference mode.\n * `TrainingMode.PRESERVE`: export the model in inference mode if model.training is\n False and in training mode if model.training is True.\n * `TrainingMode.TRAINING`: export the model in training mode. Disables optimizations\n which might interfere with training.\n input_names (Optional[Sequence[str]], optional): Names to assign to the input nodes of the graph, in order.\n Names of `module.forward` arguments will be used when None is given. Defaults to None.\n output_names (Optional[Sequence[str]], optional): Names to assign to the output nodes of the graph, in order.\n Defaults to None.\n operator_export_type (torch._C._onnx.OperatorExportTypes, optional):\n Defaults to `torch._C._onnx.OperatorExportTypes.ONNX`.\n * `OperatorExportTypes.ONNX`: Export all ops as regular ONNX ops (in the default opset domain).\n * `OperatorExportTypes.ONNX_FALLTHROUGH`: Try to convert all ops\n to standard ONNX ops in the default opset domain. If unable to do so\n (e.g. because support has not been added to convert a particular torch op to ONNX),\n fall back to exporting the op into a custom opset domain without conversion. Applies\n to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_\n as well as ATen ops. For the exported model to be usable, the runtime must support\n these non-standard ops.\n * `OperatorExportTypes.ONNX_ATEN`: All ATen ops (in the TorchScript namespace \"aten\")\n are exported as ATen ops (in opset domain \"org.pytorch.aten\").\n `ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so\n this instructs the runtime to use PyTorch's implementation of these ops.\n\n .. warning::\n\n Models exported this way are probably runnable only by Caffe2.\n\n This may be useful if the numeric differences in implementations of operators are\n causing large differences in behavior between PyTorch and Caffe2 (which is more\n common on untrained models).\n * `OperatorExportTypes.ONNX_ATEN_FALLBACK`: Try to export each ATen op\n (in the TorchScript namespace \"aten\") as a regular ONNX op. If we are unable to do so\n (e.g. because support has not been added to convert a particular torch op to ONNX),\n fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for\n context.\n For example::\n\n graph(%0 : Float):\n %3 : int = prim::Constant[value=0]()\n # conversion unsupported\n %4 : Float = aten::triu(%0, %3)\n # conversion supported\n %5 : Float = aten::mul(%4, %0)\n return (%5)\n\n Assuming `aten::triu` is not supported in ONNX, this will be exported as::\n\n graph(%0 : Float):\n %1 : Long() = onnx::Constant[value={0}]()\n # not converted\n %2 : Float = aten::ATen[operator=\"triu\"](%0, %1)\n # converted\n %3 : Float = onnx::Mul(%2, %0)\n return (%3)\n\n If PyTorch was built with Caffe2 (i.e. with `BUILD_CAFFE2=1`), then\n Caffe2-specific behavior will be enabled, including special support\n for ops are produced by the modules described in\n `Quantization <https://pytorch.org/docs/stable/quantization.html>`_.\n\n .. warning::\n\n Models exported this way are probably runnable only by Caffe2.\n opset_version (int, optional): The version of the default (ai.onnx) opset\n <https://github.com/onnx/onnx/blob/master/docs/Operators.md> to target. Must be >= 7 and <= 18.\n Defaults to 17.\n do_constant_folding (bool, optional): Apply the constant-folding optimization.\n Constant-folding will replace some of the ops that have all constant inputs\n with pre-computed constant nodes. Defaults to True.\n keep_initializers_as_inputs (Optional[bool], optional): If True, all the initializers\n (typically corresponding to parameters) in the exported graph will also be added\n as inputs to the graph. If False, then initializers are not added as inputs to the\n graph, and only the non-parameter inputs are added as inputs. This may allow for\n better optimizations (e.g. constant folding) by backends/runtimes. Defaults to None.\n custom_opsets (Optional[Mapping[str, int]], optional): A dict with schema:\n\n * KEY (str): opset domain name\n * VALUE (int): opset version\n\n If a custom opset is referenced by ``model`` but not mentioned in this dictionary,\n the opset version is set to 1. Only custom opset domain name and version should be\n indicated through this argument. Defaults to None.\n export_modules_as_functions (Union[bool, Collection[type[torch.nn.Module]]], optional): Flag to enable\n exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the\n particular types of modules to export as local functions in ONNX.\n This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because\n ``opset_version`` < 15 implies IR version < 8, which means no local function support.\n Module variables will be exported as function attributes. There are two categories of function\n attributes. Defaults to False.\n use_fast_export (bool, optional): If True, export process will be done in memory. If `module` with total\n parameter size larger than 2GB, this flag will be automatically set to `False`. If False, temporary\n export process will be done using temporary files. Defaults to True.\n apply_transforms (bool, optional): If True, ONNX transforms defined by SqueezeBits.inc will be applied for\n model optimization. If False, ONNX transformations will be skipped. However, turning this flag to `False`\n is experimental and might yield unexpected behavior. Defaults to True.\n simplify (bool, optional): If True, onnx-simplifier will be run. If False, onnx-simplifier will be skipped.\n Defaults to True.\n check_n (int, optional): Only available when `simplify=True`. The number of times to run check for the\n simplified ONNX proto after onnx-simplifier. Defaults to 1.\n skip_fuse_bn (bool, optional): Only available when `simplify=True`. Whether to skip batchnorm-fusion.\n Defaults to False.\n skipped_optimizers (Optional[list[str]], optional): Only available when `simplify=True`. The list of\n onnx-simplifier passes to skip. Defaults to None.\n See https://github.com/onnx/optimizer/tree/master/onnxoptimizer/passes for available passes.\n dynamic_dimensions (Optional[DynamicDimensions], optional): Dynamic dimensions setting configured by\n `configure_dynamic_dimensions`. Defaults to None.\n\n Raises:\n TypeError: If `f` is not a string.\n ValueError: If the quantizer has invalid condition.\n `torch.onnx.errors.CheckerError`: If the ONNX checker detects an invalid ONNX graph.\n `torch.onnx.errors.UnsupportedOperatorError`: If the ONNX graph cannot be exported because it\n uses an operator that is not supported by the exporter.\n `torch.onnx.errors.OnnxExporterError`: Other errors that can occur during export.\n All errors are subclasses of :class:`errors.OnnxExporterError`.\n \"\"\"\n\n if not isinstance(f, str):\n raise TypeError(\"owlite.onnx.export requires the argument `f` to be a string.\")\n\n if isinstance(module, GraphModule):\n if module.meta[\"owlite_status\"] == OwLiteStatus.COMPRESSED:\n log.warning(\n \"This module has not yet been calibrated. \"\n \"The onnx that comes out of this module may have unexpected results in accuracy and latency.\"\n )\n\n clip_narrow_range_weights(module)\n # Batch Norm Fusing\n fuse_bn(module)\n\n # zero point folding\n fold_zp_to_bias(module)\n\n check_fake_quantization_condition(module)\n\n device = get_most_common_device(module)\n dtype = get_most_common_floating_point_type(module)\n args = move_tensors_to(args, device, dtype)\n\n size_in_gigabytes = sum(p.numel() * p.element_size() for p in module.parameters()) / (1 << 30)\n\n if size_in_gigabytes >= 2:\n log.warning(\n f\"Model has total parameter size larger than 2 GB ({size_in_gigabytes:.2f} GB).\"\n '\"use_fast_export\" will be set to False'\n )\n use_fast_export = False\n\n export_function, optimize_function = (_export, _optimize) if use_fast_export else (_export_path, _optimize_path)\n\n if opset_version is None:\n opset_version = 17\n\n if input_names is None and isinstance(module, GraphModule):\n input_names = get_default_input_names(module, args)\n onnx_proto = export_function(\n module,\n args=args,\n export_params=export_params,\n verbose=verbose,\n training=training,\n input_names=input_names,\n output_names=output_names,\n operator_export_type=operator_export_type,\n opset_version=opset_version,\n do_constant_folding=do_constant_folding,\n keep_initializers_as_inputs=keep_initializers_as_inputs,\n custom_opsets=custom_opsets,\n export_modules_as_functions=export_modules_as_functions,\n )\n\n if skipped_optimizers is None:\n skipped_optimizers = [\"fuse_qkv\"]\n\n onnx_proto = optimize_function(\n onnx_proto,\n apply_transforms=apply_transforms,\n simplify=simplify,\n check_n=check_n,\n skip_fuse_bn=skip_fuse_bn,\n skipped_optimizers=skipped_optimizers,\n )\n\n if dynamic_dimensions is not None:\n onnx_proto = dynamize(onnx_proto, dynamic_dimensions)\n\n onnx_proto.producer_name = f\"owlite + {onnx_proto.producer_name}\"\n onnx_proto.doc_string = \"Processed by OwLite\"\n\n model_dir = os.path.dirname(f)\n name, _ = os.path.splitext(os.path.basename(f))\n location = f\"{name}.bin\"\n abs_location = os.path.join(model_dir, location)\n\n log.info(f\"Saving exported ONNX proto at {f} with external data {location}\")\n if model_dir:\n os.makedirs(model_dir, exist_ok=True)\n if abs_location is not None and os.path.isfile(abs_location):\n log.warning(f\"External data file at {abs_location} will be overwritten.\")\n # os.remove is required since onnx.save opens the external data file with mode='ab'\n os.remove(abs_location)\n onnx.save(\n onnx_proto,\n f,\n location=location,\n save_as_external_data=True,\n size_threshold=0,\n )"
},
{
"identifier": "get_input_shape_signature",
"path": "owlite/backend/onnx/export.py",
"snippet": "def get_input_shape_signature(\n module: torch.nn.Module, *args: Any, **kwargs: Any\n) -> list[tuple[str, Union[tuple[int, ...], str]]]:\n \"\"\"Maps the parameter names of a PyTorch module's forward method to the corresponding values' shapes or class name.\n\n This function returns a list of tuples, where each tuple contains a parameter name and its corresponding shape\n (as a tuple of integers) if the value is an instance of `torch.Tensor` or otherwise the name of the class of\n the value.\n\n Args:\n module (torch.nn.Module): The PyTorch module to inspect.\n args (Any): Positional arguments to be passed to the module.\n kwargs (Any): Keyword arguments to be passed to the module.\n\n Returns:\n list[tuple[str, Union[tuple[int, ...], str]]]: A list of tuples mapping parameter names to their shape\n (if they are torch.Tensor instances) or to their class name (for non-torch.Tensor instances).\n\n Note:\n This function assumes that `args` and `kwargs` match the signatures of the module's forward method exactly,\n in order and length. If they don't, the result may not be as expected or exceptions might occur.\n \"\"\"\n signature_map = map_signature(module.forward, *args, **kwargs)\n return [\n (\n name,\n tuple(value.shape) if isinstance(value, torch.Tensor) else value.__class__.__name__,\n )\n for name, value in signature_map\n ]"
},
{
"identifier": "log",
"path": "owlite/logger.py",
"snippet": "class Logger(logging.Logger):\n class _WarningFilterContext:\n class WarningFilter(logging.Filter):\n ENV_VAR = \"OWLITE_LOG_LEVEL\"\n DEBUG_WARNING = 15\n ULTRA_VERBOSE = -10\n def ignore_warnings(self):\n def __init__(self, logger) -> None:\n def __enter__(self):\n def filter(self, record):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def debug_warning(self, msg, *args, **kwargs):\n def level(self) -> int:\n def level(self, value):\ndef suppress_owlite_warnings(cls):\n def new_init(self, *args, **kwargs):"
},
{
"identifier": "ONNXExportOptions",
"path": "owlite/options/onnx_export_options.py",
"snippet": "class ONNXExportOptions:\n \"\"\"\n Class handling options for ONNX export.\n\n OwLite internally imports the target model to ONNX during conversion or benchmarking.\n Users can set options for ONNX export using this class.\n \"\"\"\n\n opset_version: int = 17"
},
{
"identifier": "GraphQuantizationOptions",
"path": "owlite/options/quantization_options.py",
"snippet": "class GraphQuantizationOptions(OptionsDict):\n \"\"\"\n * Key (str): the name of a FX node\n * Value (NodeQuantizationOptions): node quantization options\n \"\"\"\n\n ValueType = NodeQuantizationOptions"
},
{
"identifier": "quantize",
"path": "owlite/quantize.py",
"snippet": "def quantize(model: GraphModule, options: GraphQuantizationOptions) -> GraphModule:\n \"\"\"Quantizes the model with the specification described in options.\n\n This function inserts quantizers with the quantization options specified in the options,\n substitutes them with the Quantized module, and performs post-processing. The linear module\n that quantizes the bias cannot fuse the batch norm after quantizing, so it proceeds to fuse\n the batch norm. Then, it fuses quantizers with the same quantization option that correspond\n to the same tensor in the original model.\n\n Args:\n model (GraphModule): The symbolic traced model to be quantized.\n options (GraphQuantizationOptions): Options specifying the quantization.\n\n Raises:\n TypeError: If model is not a instance of `GraphModule`.\n\n Returns:\n GraphModule: Quantized model.\n \"\"\"\n\n if not isinstance(model, GraphModule):\n raise TypeError(\"Only GraphModule instance can be quantized with `owlite.quantize`\")\n configure(model, options)\n fuse_linear_bn_with_quantized_bias(model)\n log.debug(\"Fusing the redundant quantizers.\")\n fuse_redundant_quantizers(model)\n enable_quantizers(model, True)\n return model"
}
] | import json
import os
import torch
from dataclasses import asdict, dataclass
from typing import Any, Optional
from torch.fx import GraphModule # type: ignore
from torch.nn.parallel import DataParallel, DistributedDataParallel
from owlite_core.cli.device import OWLITE_DEVICE_NAME
from owlite_core.constants import (
OWLITE_FRONT_BASE_URL,
OWLITE_REPO_PATH,
OWLITE_REPORT_URL,
)
from owlite_core.owlite_settings import OWLITE_SETTINGS
from .api.device.devices import (
download_trt_engine,
poll_run_benchmark,
request_trt_benchmark,
)
from .api.dove.doves import get_configuration, upload_baseline
from .api.main.baselines import check_baseline_existence, create_baseline
from .api.main.projects import create_or_load_project
from .api.main.runs import (
copy_run,
create_run,
get_benchmark_key,
get_run_info,
update_run_info,
upload_run_onnx_proto,
)
from .backend.fx.trace import symbolic_trace
from .backend.onnx.dynamize import configure_dynamic_dimensions
from .backend.onnx.export import export, get_input_shape_signature
from .logger import log
from .options import GraphQuantizationOptions, ONNXExportOptions
from .quantize import quantize | 10,689 | # type: ignore
"""OwLite Optimization Module
This module facilitates optimization and benchmarking of models using OwLite services."""
@dataclass
class OwLite:
"""Class handling OwLite project, baseline, and experiment configurations.
The OwLite class manages project, baseline, and experiment configurations within the OwLite system.
It allows users to create or load projects, set baselines, create or duplicate experiments, convert models,
and benchmark models against the specified configurations.
"""
project_id: str
project_name: str
baseline_name: str
experiment_name: str
| # type: ignore
"""OwLite Optimization Module
This module facilitates optimization and benchmarking of models using OwLite services."""
@dataclass
class OwLite:
"""Class handling OwLite project, baseline, and experiment configurations.
The OwLite class manages project, baseline, and experiment configurations within the OwLite system.
It allows users to create or load projects, set baselines, create or duplicate experiments, convert models,
and benchmark models against the specified configurations.
"""
project_id: str
project_name: str
baseline_name: str
experiment_name: str | onnx_export_options: ONNXExportOptions | 24 | 2023-12-08 06:41:50+00:00 | 12k |
bolna-ai/bolna | bolna/providers.py | [
{
"identifier": "PollySynthesizer",
"path": "bolna/synthesizer/polly_synthesizer.py",
"snippet": "class PollySynthesizer(BaseSynthesizer):\n def __init__(self, voice, language, audio_format=\"pcm\", sampling_rate=\"8000\", stream=False, engine=\"neural\",\n buffer_size=400):\n super().__init__(stream, buffer_size)\n self.engine = engine\n self.format = audio_format.lower()\n self.voice = voice\n self.language = language\n self.sample_rate = str(sampling_rate)\n\n # @TODO: initialize client here\n self.client = None\n\n @staticmethod\n async def create_client(service: str, session: AioSession, exit_stack: AsyncExitStack):\n # creates AWS session from system environment credentials & config\n return await exit_stack.enter_async_context(session.create_client(service))\n async def __generate_http(self, text):\n session = AioSession()\n async with AsyncExitStack() as exit_stack:\n polly = await self.create_client(\"polly\", session, exit_stack)\n logger.info(f\"Generating TTS response for text: {text}, SampleRate {self.sample_rate} format {self.format}\")\n try:\n response = await polly.synthesize_speech(\n Engine=self.engine,\n Text=text,\n OutputFormat=self.format,\n VoiceId=self.voice,\n LanguageCode=self.language,\n SampleRate=self.sample_rate\n )\n except (BotoCoreError, ClientError) as error:\n logger.error(error)\n else:\n return await response[\"AudioStream\"].read()\n \n async def open_connection(self):\n pass\n \n async def synthesize(self, text):\n #This is used for one off synthesis mainly for use cases like voice lab and IVR\n audio = await self.__generate_http(text)\n return audio\n\n async def generate(self):\n while True:\n logger.info(\"Generating TTS response\")\n message = await self.internal_queue.get()\n logger.info(f\"Generating TTS response for message: {message}\")\n meta_info, text = message.get(\"meta_info\"), message.get(\"data\")\n message = await self.__generate_http(text)\n if \"end_of_llm_stream\" in meta_info and meta_info[\"end_of_llm_stream\"]:\n meta_info[\"end_of_synthesizer_stream\"] = True\n yield create_ws_data_packet(message, meta_info)\n\n async def push(self, message):\n logger.info(\"Pushed message to internal queue\")\n self.internal_queue.put_nowait(message)"
},
{
"identifier": "XTTSSynthesizer",
"path": "bolna/synthesizer/xtts_synthesizer.py",
"snippet": "class XTTSSynthesizer(BaseSynthesizer):\n def __init__(self, audio_format = \"wav\", stream = False, sampling_rate=\"24000\", buffer_size=400, language = \"en\", voice = \"rohan\"):\n super().__init__(stream, buffer_size)\n self.buffer = [] # Initialize buffer to make sure we're sending chunks of words instead of token wise\n self.buffered = False\n self.ws_url = os.getenv('TTS_WS')\n self.api_url = os.getenv('TTS_API_URL')\n self.format = audio_format\n self.stream = stream\n self.language = language\n self.voice = voice\n self.sampling_rate = sampling_rate\n self.websocket_connection = None \n\n async def _send_payload(self, payload):\n url = self.api_url\n\n async with aiohttp.ClientSession() as session:\n if payload is not None:\n async with session.post(url, json=payload) as response:\n if response.status == 200:\n data = await response.read() \n return data\n else:\n logger.error(f\"Error: {response.status} - {await response.text()}\")\n else:\n logger.info(\"Payload was null\")\n\n async def __generate_http(self, text):\n payload = None\n logger.info(f\"text {text}\")\n payload = {\n \"text\": text,\n \"model\": \"xtts\",\n \"language\": self.language,\n \"voice\": self.voice\n }\n logger.info(f\"Sending {payload}\")\n response = await self._send_payload(payload)\n return response\n\n async def _generate(self, text):\n try:\n yield await self._http_tts(text)\n except Exception as e:\n logger.error(f\"Error in xtts generate {e}\")\n\n async def sender(self, text, end_of_llm_stream):\n logger.info(f\"Sending to the serve {text} which is end_of_llm_stream {end_of_llm_stream}\")\n input_message = {\n \"text\": text,\n \"model\": \"xtts\",\n \"language\": self.language,\n \"voice\": self.voice,\n \"end_of_stream\": end_of_llm_stream\n }\n\n await self.websocket_connection.send(json.dumps(input_message))\n logger.info(f\"Sent to the server {input_message}\")\n\n async def receiver(self):\n while True:\n try:\n if self.websocket_connection is not None:\n chunk = await self.websocket_connection.recv()\n if not self.buffered and len(self.buffer) < 3:\n self.buffer.append(chunk)\n continue\n if len(self.buffer) == 3:\n chunk = b''.join(self.buffer)\n self.buffer = []\n self.buffered = True\n\n if int(self.sampling_rate) != 24000:\n logger.info(f\"Changing the sampling rate to {int(self.sampling_rate)}\")\n chunk = audioop.ratecv(chunk, 2, 1, 24000, int(self.sampling_rate), None)[0]\n \n yield chunk\n\n\n except ConnectionClosed:\n logger.error(\"Connection closed\")\n break\n except Exception as e:\n logger.error(f\"Error in receiving and processing audio bytes {e}\")\n\n async def synthesize(self, text):\n #This is used for one off synthesis mainly for use cases like voice lab and IVR\n audio = await self.__generate_http(text)\n return audio\n\n async def open_connection(self):\n if self.websocket_connection is None:\n self.websocket_connection = await websockets.connect(self.ws_url)\n logger.info(\"Connected to the server\")\n async def generate(self):\n try:\n if self.stream:\n async for message in self.receiver():\n logger.info(f\"Received message friom server\")\n yield create_ws_data_packet(message, self.meta_info)\n if message == b'\\x00':\n logger.info(\"received null byte and hence end of stream\")\n self.meta_info[\"end_of_synthesizer_stream\"] = True\n yield create_ws_data_packet(message, self.meta_info)\n else:\n while True:\n message = await self.internal_queue.get()\n logger.info(f\"Generating TTS response for message: {message}\")\n meta_info, text = message.get(\"meta_info\"), message.get(\"data\")\n audio = await self.__generate_http(text)\n if \"end_of_llm_stream\" in meta_info and meta_info[\"end_of_llm_stream\"]:\n meta_info[\"end_of_synthesizer_stream\"] = True\n yield create_ws_data_packet(audio, meta_info)\n except Exception as e:\n logger.error(f\"Error in xtts generate {e}\")\n\n \n async def push(self, message):\n logger.info(f\"Pushed message to internal queue {message}\")\n if self.stream:\n meta_info, text = message.get(\"meta_info\"), message.get(\"data\")\n end_of_llm_stream = \"end_of_llm_stream\" in meta_info and meta_info[\"end_of_llm_stream\"]\n self.meta_info = meta_info\n self.sender_task = asyncio.create_task(self.sender(text, end_of_llm_stream))\n else:\n self.internal_queue.put_nowait(message)"
},
{
"identifier": "ElevenlabsSynthesizer",
"path": "bolna/synthesizer/elevenlabs_synthesizer.py",
"snippet": "class ElevenlabsSynthesizer(BaseSynthesizer):\n def __init__(self, voice, voice_id, model=\"eleven_multilingual_v1\", audio_format = \"pcm\", sampling_rate = \"16000\", stream=False, buffer_size=400):\n super().__init__(stream)\n self.api_key = os.environ[\"ELEVENLABS_API_KEY\"]\n self.voice = voice_id\n self.model = model\n self.stream = stream #Issue with elevenlabs streaming that we need to always send the text quickly\n self.websocket_connection = None\n self.connection_open = False\n self.sampling_rate = sampling_rate\n self.audio_format = audio_format\n self.ws_url = f\"wss://api.elevenlabs.io/v1/text-to-speech/{self.voice}/stream-input?model_id=eleven_multilingual_v1&optimize_streaming_latency=2&output_format={self.get_format(self.audio_format, self.sampling_rate)}\"\n self.api_url = f\"https://api.elevenlabs.io/v1/text-to-speech/{self.voice}?optimize_streaming_latency=3&output_format=\"\n \n def get_format(self, format, sampling_rate): \n #Eleven labs only allow mp3_44100_64, mp3_44100_96, mp3_44100_128, mp3_44100_192, pcm_16000, pcm_22050, pcm_24000, ulaw_8000\n if format == \"pcm\":\n return f\"pcm_16000\"\n if format == \"mp3\":\n return f\"mp3_44100_128\"\n if format == \"ulaw\":\n return \"ulaw_8000\"\n else:\n return \"mp3_44100_128\"\n \n async def sender(self, text, end_of_llm_stream=False): # sends text to websocket\n if not self.connection_open:\n logger.info(\"Connecting to elevenlabs websocket...\")\n bos_message = {\n \"text\": \" \",\n \"voice_settings\": {\n \"stability\": 0.5,\n \"similarity_boost\": 0.5\n },\n \"xi_api_key\": self.api_key,\n }\n await self.websocket_connection.send(json.dumps(bos_message))\n self.connection_open = True\n\n if text != \"\":\n logger.info(f\"Sending message {text}\")\n\n input_message = {\n \"text\": f\"{text} \",\n \"try_trigger_generation\": True,\n \"flush\": True\n }\n await self.websocket_connection.send(json.dumps(input_message))\n\n if end_of_llm_stream:\n logger.info(\"End of LLM stream\")\n eos_message = {\n \"text\": \"\"\n }\n await self.websocket_connection.send(json.dumps(eos_message))\n # self.connection_open = False\n\n async def receiver(self):\n while True:\n if not self.connection_open:\n logger.info(\"Since eleven labs always closes the connection after every leg, simply open it...\")\n await self.open_connection()\n try:\n response = await self.websocket_connection.recv()\n data = json.loads(response)\n\n logger.info(\"response for isFinal: {}\".format(data.get('isFinal', False)))\n if \"audio\" in data and data[\"audio\"]:\n chunk = base64.b64decode(data[\"audio\"])\n # @TODO make it better - for example sample rate changing for mp3 and other formats \n if self.audio_format == \"pcm\" and self.sampling_rate != 16000:\n chunk = audioop.ratecv(chunk, 2, 1,16000 , int(self.sampling_rate), None)[0]\n elif self.audio_format == \"mp3\" and self.sampling_rate != 44100:\n chunk = audioop.ratecv(chunk, 2, 1, 44100 , int(self.sampling_rate), None)[0]\n yield chunk\n \n if \"isFinal\" in data and data[\"isFinal\"]:\n self.connection_open = False\n yield b'\\x00'\n\n else:\n logger.info(\"No audio data in the response\")\n except websockets.exceptions.ConnectionClosed:\n break\n\n async def __send_payload(self, payload, format = None):\n headers = {\n 'xi-api-key': self.api_key\n }\n\n url = f\"{self.api_url}{self.get_format(self.audio_format, self.sampling_rate)}\" if format is None else f\"{self.api_url}{format}\"\n\n async with aiohttp.ClientSession() as session:\n if payload is not None:\n async with session.post(url, headers=headers, json=payload) as response:\n if response.status == 200:\n data = await response.read()\n return data\n else:\n logger.error(f\"Error: {response.status} - {await response.text()}\")\n else:\n logger.info(\"Payload was null\")\n\n async def synthesize(self, text):\n audio = await self.__generate_http(text, format = \"mp3_44100_128\")\n return audio\n\n async def __generate_http(self, text, format = None):\n payload = None\n logger.info(f\"text {text}\")\n payload = {\n \"text\": text,\n \"model_id\": self.model,\n \"voice_settings\": {\n \"stability\": 0.5,\n \"similarity_boost\": 0.5,\n \"optimize_streaming_latency\": 3\n }\n }\n response = await self.__send_payload(payload, format = format)\n return response\n\n async def generate(self):\n try:\n if self.stream:\n async for message in self.receiver():\n logger.info(f\"Received message friom server\")\n yield create_ws_data_packet(message, self.meta_info)\n if message == b'\\x00':\n logger.info(\"received null byte and hence end of stream\")\n self.meta_info[\"end_of_synthesizer_stream\"] = True\n yield create_ws_data_packet(message, self.meta_info)\n else:\n while True:\n message = await self.internal_queue.get()\n logger.info(f\"Generating TTS response for message: {message}\")\n meta_info, text = message.get(\"meta_info\"), message.get(\"data\")\n audio = await self.__generate_http(text)\n if \"end_of_llm_stream\" in meta_info and meta_info[\"end_of_llm_stream\"]:\n meta_info[\"end_of_synthesizer_stream\"] = True\n yield create_ws_data_packet(audio, meta_info)\n except Exception as e:\n logger.error(f\"Error in eleven labs generate {e}\")\n\n async def open_connection(self):\n if self.websocket_connection is None or self.connection_open is False:\n self.websocket_connection = await websockets.connect(self.ws_url)\n logger.info(\"Connected to the server\")\n\n async def push(self, message):\n logger.info(f\"Pushed message to internal queue {message}\")\n if self.stream:\n meta_info, text = message.get(\"meta_info\"), message.get(\"data\")\n end_of_llm_stream = \"end_of_llm_stream\" in meta_info and meta_info[\"end_of_llm_stream\"]\n self.meta_info = meta_info\n self.sender_task = asyncio.create_task(self.sender(text, end_of_llm_stream))\n else:\n self.internal_queue.put_nowait(message)"
},
{
"identifier": "DeepgramTranscriber",
"path": "bolna/transcriber/deepgram_transcriber.py",
"snippet": "class DeepgramTranscriber(BaseTranscriber):\n def __init__(self, provider, input_queue=None, model='deepgram', stream=True, language=\"en\", endpointing=\"400\",\n sampling_rate=\"16000\", encoding=\"linear16\"):\n super().__init__(input_queue)\n self.endpointing = endpointing\n self.language = language\n self.stream = stream\n self.provider = provider\n self.heartbeat_task = None\n self.sender_task = None\n self.model = 'deepgram'\n self.sampling_rate = sampling_rate\n self.encoding = encoding\n if not self.stream:\n self.session = aiohttp.ClientSession()\n self.api_url = f\"https://api.deepgram.com/v1/listen?model=nova-2&filler_words=true&language={self.language}\"\n\n def get_deepgram_ws_url(self):\n websocket_url = (f\"wss://api.deepgram.com/v1/listen?encoding=linear16&sample_rate=16000&channels=1\"\n f\"&filler_words=true&endpointing={self.endpointing}\")\n\n if self.provider == 'twilio':\n websocket_url = (f\"wss://api.deepgram.com/v1/listen?model=nova-2&encoding=mulaw&sample_rate=8000&channels\"\n f\"=1&filler_words=true&endpointing={self.endpointing}\")\n\n if self.provider == \"playground\":\n websocket_url = (f\"wss://api.deepgram.com/v1/listen?model=nova-2&encoding=opus&sample_rate=8000&channels\"\n f\"=1&filler_words=true&endpointing={self.endpointing}\")\n if \"en\" not in self.language:\n websocket_url += '&language={}'.format(self.language)\n return websocket_url\n\n async def send_heartbeat(self, ws):\n try:\n while True:\n data = {'type': 'KeepAlive'}\n await ws.send(json.dumps(data))\n await asyncio.sleep(5) # Send a heartbeat message every 5 seconds\n except Exception as e:\n logger.error('Error while sending: ' + str(e))\n raise Exception(\"Something went wrong while sending heartbeats to {}\".format(self.model))\n\n async def toggle_connection(self):\n self.connection_on = False\n if self.heartbeat_task is not None:\n await self.heartbeat_task.cancel()\n await self.sender_task.cancel()\n\n async def _get_http_transcription(self, audio_data):\n if self.session is None or self.session.closed:\n self.session = aiohttp.ClientSession()\n\n headers = {\n 'Authorization': 'Token {}'.format(os.getenv('DEEPGRAM_AUTH_TOKEN')),\n 'Content-Type': 'audio/webm' # Currently we are assuming this is via browser\n }\n start_time = time.time()\n async with self.session as session:\n async with session.post(self.api_url, data=audio_data, headers=headers) as response:\n response_data = await response.json()\n logger.info(f\"response_data {response_data} total time {time.time() - start_time}\")\n transcript = response_data[\"results\"][\"channels\"][0][\"alternatives\"][0][\"transcript\"]\n logger.info(f\"transcript {transcript} total time {time.time() - start_time}\")\n self.meta_info['transcriber_duration'] = response_data[\"metadata\"][\"duration\"]\n return create_ws_data_packet(transcript, self.meta_info)\n\n async def _handle_data_packet(self, ws_data_packet, ws):\n if 'eos' in ws_data_packet['meta_info'] and ws_data_packet['meta_info']['eos'] is True:\n await self._close(ws, data={\"type\": \"CloseStream\"})\n return True # Indicates end of processing\n\n return False\n\n async def sender(self, ws=None):\n try:\n while True:\n ws_data_packet = await self.input_queue.get()\n end_of_stream = await self._handle_data_packet(ws_data_packet, ws)\n if end_of_stream:\n break\n\n self.meta_info = ws_data_packet.get('meta_info')\n transcription = await self._get_http_transcription(ws_data_packet.get('data'))\n yield transcription\n except Exception as e:\n logger.error('Error while sending: ' + str(e))\n raise Exception(\"Something went wrong\")\n\n async def sender_stream(self, ws=None):\n try:\n while True:\n ws_data_packet = await self.input_queue.get()\n end_of_stream = await self._handle_data_packet(ws_data_packet, ws)\n if end_of_stream:\n break\n\n self.meta_info = ws_data_packet.get('meta_info')\n await asyncio.gather(ws.send(ws_data_packet.get('data')))\n\n except Exception as e:\n logger.error('Error while sending: ' + str(e))\n raise Exception(\"Something went wrong\")\n\n async def receiver(self, ws):\n curr_message = \"\"\n async for msg in ws:\n try:\n msg = json.loads(msg)\n if msg['type'] == \"Metadata\":\n logger.info(f\"Got a summary object {msg}\")\n self.meta_info[\"transcriber_duration\"] = msg[\"duration\"]\n yield create_ws_data_packet(\"transcriber_connection_closed\", self.meta_info)\n return\n\n transcript = msg['channel']['alternatives'][0]['transcript']\n\n self.update_meta_info()\n\n if transcript and len(transcript.strip()) != 0:\n if await self.signal_transcription_begin(msg):\n yield create_ws_data_packet(\"TRANSCRIBER_BEGIN\", self.meta_info)\n\n curr_message += \" \" + transcript\n\n if (msg[\"speech_final\"] and self.callee_speaking) or not self.stream:\n yield create_ws_data_packet(curr_message, self.meta_info)\n logger.info('User: {}'.format(curr_message))\n curr_message = \"\"\n yield create_ws_data_packet(\"TRANSCRIBER_END\", self.meta_info)\n self.callee_speaking = False\n self.last_vocal_frame_time = None\n self.previous_request_id = self.current_request_id\n self.current_request_id = None\n except Exception as e:\n logger.error(f\"Error while getting transcriptions {e}\")\n yield create_ws_data_packet(\"TRANSCRIBER_END\", self.meta_info)\n\n def deepgram_connect(self):\n websocket_url = self.get_deepgram_ws_url()\n extra_headers = {\n 'Authorization': 'Token {}'.format(os.getenv('DEEPGRAM_AUTH_TOKEN'))\n }\n deepgram_ws = websockets.connect(websocket_url, extra_headers=extra_headers)\n\n return deepgram_ws\n\n async def transcribe(self):\n async with self.deepgram_connect() as deepgram_ws:\n if self.stream:\n self.sender_task = asyncio.create_task(self.sender_stream(deepgram_ws))\n self.heartbeat_task = asyncio.create_task(self.send_heartbeat(deepgram_ws))\n async for message in self.receiver(deepgram_ws):\n if self.connection_on:\n yield message\n else:\n logger.info(\"closing the deepgram connection\")\n await self._close(deepgram_ws, data={\"type\": \"CloseStream\"})\n else:\n async for message in self.sender():\n yield message"
},
{
"identifier": "DefaultInputHandler",
"path": "bolna/input_handlers/default.py",
"snippet": "class DefaultInputHandler:\n def __init__(self, queues=None, websocket=None, input_types=None, mark_set=None, connected_through_dashboard=False):\n self.queues = queues\n self.websocket = websocket\n self.input_types = input_types\n self.websocket_listen_task = None\n self.running = True\n self.connected_through_dashboard = connected_through_dashboard\n\n async def stop_handler(self):\n self.running = False\n try:\n await self.websocket.close()\n except Exception as e:\n logger.error(f\"Error closing WebSocket: {e}\")\n\n async def _listen(self):\n try:\n while self.running:\n request = await self.websocket.receive_json()\n\n if request['type'] not in self.input_types.keys() and not self.connected_through_dashboard:\n logger.info(f\"straight away returning\")\n return {\"message\": \"invalid input type\"}\n\n if request['type'] == 'audio':\n data = base64.b64decode(request['data'])\n ws_data_packet = create_ws_data_packet(\n data=data,\n meta_info={\n 'io': 'default',\n 'type': request['type'],\n 'sequence': self.input_types['audio']\n })\n\n self.queues['transcriber'].put_nowait(ws_data_packet)\n\n elif request[\"type\"] == \"text\":\n logger.info(f\"Received text: {request['data']}\")\n data = request['data']\n logger.info(f\"Sequences {self.input_types}\")\n ws_data_packet = create_ws_data_packet(\n data=data,\n meta_info={\n 'io': 'default',\n 'type': request['type'],\n 'sequence': self.input_types['audio']\n\n })\n\n if self.connected_through_dashboard:\n ws_data_packet[\"meta_info\"][\"bypass_synth\"] = True\n\n self.queues['llm'].put_nowait(ws_data_packet)\n logger.info(f\"Put into llm queue\")\n else:\n return {\"message\": \"Other modalities not implemented yet\"}\n except Exception as e:\n # Send EOS message to transcriber to shut the connection\n ws_data_packet = create_ws_data_packet(\n data=None,\n meta_info={\n 'io': 'default',\n 'eos': True\n })\n self.queues['transcriber'].put_nowait(ws_data_packet)\n logger.info(f\"Error while handling websocket message: {e}\")\n return\n\n async def handle(self):\n self.websocket_listen_task = asyncio.create_task(self._listen())"
},
{
"identifier": "TwilioInputHandler",
"path": "bolna/input_handlers/twilio.py",
"snippet": "class TwilioInputHandler(DefaultInputHandler):\n def __init__(self, queues, websocket=None, input_types=None, mark_set=None, connected_through_dashboard=False):\n super().__init__(queues, websocket, input_types, connected_through_dashboard)\n self.stream_sid = None\n self.call_sid = None\n self.buffer = []\n self.message_count = 0\n self.mark_set = mark_set\n self.last_media_received = 0\n\n async def call_start(self, packet):\n start = packet['start']\n self.call_sid = start['callSid']\n self.stream_sid = start['streamSid']\n\n async def process_mark_message(self, packet):\n if packet[\"mark\"][\"name\"] in self.mark_set:\n self.mark_set.remove(packet[\"mark\"][\"name\"])\n\n async def stop_handler(self):\n logger.info(\"stopping handler\")\n self.running = False\n logger.info(\"sleeping for 5 seconds so that whatever needs to pass is passed\")\n await asyncio.sleep(5)\n try:\n await self.websocket.close()\n logger.info(\"WebSocket connection closed\")\n except Exception as e:\n logger.error(f\"Error closing WebSocket: {e}\")\n\n async def ingest_audio(self, audio_data, meta_info):\n ws_data_packet = create_ws_data_packet(data=audio_data, meta_info=meta_info)\n self.queues['transcriber'].put_nowait(ws_data_packet)\n\n async def _listen(self):\n while True:\n try:\n message = await self.websocket.receive_text()\n\n packet = json.loads(message)\n if packet['event'] == 'start':\n await self.call_start(packet)\n elif packet['event'] == 'media':\n media_data = packet['media']\n media_audio = base64.b64decode(media_data['payload'])\n media_ts = int(media_data[\"timestamp\"])\n\n if packet['media']['track'] == 'inbound':\n meta_info = {\n 'io': 'twilio',\n 'call_sid': self.call_sid,\n 'stream_sid': self.stream_sid,\n 'sequence': self.input_types['audio']\n }\n\n if self.last_media_received + 20 < media_ts:\n bytes_to_fill = 8 * (media_ts - (self.last_media_received + 20))\n logger.info(f\"Filling {bytes_to_fill} bytes of silence\")\n await self.ingest_audio(b\"\\xff\" * bytes_to_fill, meta_info)\n\n self.last_media_received = media_ts\n await self.ingest_audio(media_audio, meta_info)\n else:\n logger.info(\"Getting media elements but not inbound media\")\n\n elif packet['event'] == 'mark':\n await self.process_mark_message(packet)\n\n elif packet['event'] == 'stop':\n logger.info('call stopping')\n ws_data_packet = create_ws_data_packet(data=None, meta_info={'io': 'default', 'eos': True})\n self.queues['transcriber'].put_nowait(ws_data_packet)\n break\n\n except Exception as e:\n ws_data_packet = create_ws_data_packet(\n data=None,\n meta_info={\n 'io': 'default',\n 'eos': True\n })\n self.queues['transcriber'].put_nowait(ws_data_packet)\n logger.error('Exception in twilio_receiver reading events: {}'.format(e))\n break\n\n async def handle(self):\n self.websocket_listen_task = asyncio.create_task(self._listen())"
},
{
"identifier": "DefaultOutputHandler",
"path": "bolna/output_handlers/default.py",
"snippet": "class DefaultOutputHandler:\n def __init__(self, websocket=None, mark_set=None, log_dir_name=None):\n self.websocket = websocket\n self.is_interruption_task_on = False\n\n # @TODO Figure out the best way to handle this\n async def handle_interruption(self):\n message_clear = {\n \"event\": \"clear\"\n }\n\n async def handle(self, packet):\n try:\n logger.info(f\"Packet received:\")\n data = None\n if packet[\"meta_info\"]['type'] in ('audio', 'text'):\n if packet[\"meta_info\"]['type'] == 'audio':\n logger.info(f\"Sending audio\")\n data = base64.b64encode(packet['data']).decode(\"utf-8\")\n elif packet[\"meta_info\"]['type'] == 'text':\n logger.info(f\"Sending text response {packet['data']}\")\n data = packet['data']\n\n response = {\"data\": data, \"type\": packet[\"meta_info\"]['type']}\n await self.websocket.send_json(response)\n\n else:\n logger.error(\"Other modalities are not implemented yet\")\n except Exception as e:\n logger.error(f\"something went wrong in speaking {e}\")"
},
{
"identifier": "TwilioOutputHandler",
"path": "bolna/output_handlers/twilio.py",
"snippet": "class TwilioOutputHandler(DefaultOutputHandler):\n def __init__(self, websocket=None, mark_set=None, log_dir_name=None):\n super().__init__(websocket, log_dir_name)\n self.mark_set = mark_set\n\n self.stream_sid = None\n self.current_request_id = None\n self.rejected_request_ids = set()\n\n async def handle_interruption(self):\n logger.info(\"interrupting because user spoke in between\")\n if len(self.mark_set) > 0:\n message_clear = {\n \"event\": \"clear\",\n \"streamSid\": self.stream_sid,\n }\n await self.websocket.send_text(json.dumps(message_clear))\n self.mark_set = set()\n\n async def send_sms(self, message_text, call_number):\n message = twilio_client.messages.create(\n to='{}'.format(call_number),\n from_='{}'.format(os.getenv('TWILIO_PHONE_NUMBER')),\n body=message_text)\n logger.info(f'Sent whatsapp message: {message_text}')\n return message.sid\n\n async def send_whatsapp(self, message_text, call_number):\n message = twilio_client.messages.create(\n to='whatsapp:{}'.format(call_number),\n from_='whatsapp:{}'.format(os.getenv('TWILIO_PHONE_NUMBER')),\n body=message_text)\n logger.info(f'Sent whatsapp message: {message_text}')\n return message.sid\n\n async def handle(self, ws_data_packet):\n try:\n audio_chunk = ws_data_packet.get('data')\n meta_info = ws_data_packet.get('meta_info')\n self.stream_sid = meta_info.get('stream_sid', None)\n\n try:\n if self.current_request_id == meta_info['request_id']:\n if len(audio_chunk) == 1:\n audio_chunk += b'\\x00'\n\n if audio_chunk and self.stream_sid and len(audio_chunk) != 1:\n # Convert from 16-bit PCM to ULAW, resampling from 16000 Hz to 8000 Hz\n audio_resampled = audioop.ratecv(audio_chunk, 2, 1, 16000, 8000, None)\n audio = audioop.lin2ulaw(audio_resampled[0], 2)\n base64_audio = base64.b64encode(audio).decode(\"utf-8\")\n message = {\n 'event': 'media',\n 'streamSid': self.stream_sid,\n 'media': {\n 'payload': base64_audio\n }\n }\n\n await self.websocket.send_text(json.dumps(message))\n\n mark_id = str(uuid.uuid4())\n self.mark_set.add(mark_id)\n mark_message = {\n \"event\": \"mark\",\n \"streamSid\": self.stream_sid,\n \"mark\": {\n \"name\": mark_id\n }\n }\n await self.websocket.send_text(json.dumps(mark_message))\n except Exception as e:\n logger.error(f'something went wrong while sending message to twilio {e}')\n\n except Exception as e:\n logger.error(f'something went wrong while handling twilio {e}')"
},
{
"identifier": "OpenAiLLM",
"path": "bolna/llms/openai_llm.py",
"snippet": "class OpenAiLLM(BaseLLM):\n def __init__(self, max_tokens=100, buffer_size=40, streaming_model=\"gpt-3.5-turbo-16k\",\n classification_model=\"gpt-3.5-turbo-1106\", temperature= 0.1):\n super().__init__(max_tokens, buffer_size)\n self.model = streaming_model\n self.started_streaming = False\n self.async_client = AsyncOpenAI()\n self.max_tokens = max_tokens\n self.classification_model = classification_model\n self.temperature = temperature\n\n async def generate_stream(self, messages, classification_task=False, synthesize=True, request_json=False):\n response_format = self.get_response_format(request_json)\n\n answer, buffer = \"\", \"\"\n model = self.classification_model if classification_task is True else self.model\n logger.info(f\"request to open ai {messages}\")\n #message_hash = get_md5_hash(messages[-1].content) \n async for chunk in await self.async_client.chat.completions.create(model=model, temperature=self.temperature,\n messages=messages, stream=True,\n max_tokens=self.max_tokens,\n response_format=response_format):\n if text_chunk := chunk.choices[0].delta.content:\n answer += text_chunk\n buffer += text_chunk\n\n if len(buffer) >= self.buffer_size and synthesize:\n text = ' '.join(buffer.split(\" \")[:-1])\n\n if synthesize:\n if not self.started_streaming:\n self.started_streaming = True\n yield text,False\n buffer = buffer.split(\" \")[-1]\n\n if synthesize: #This is used only in streaming sense \n yield buffer, True\n else:\n yield answer, True\n self.started_streaming = False\n\n async def generate(self, messages, classification_task=False, stream=False, synthesize=True, request_json=False):\n response_format = self.get_response_format(request_json)\n logger.info(f\"request to open ai {messages}\")\n model = self.classification_model if classification_task is True else self.model\n\n completion = await self.async_client.chat.completions.create(model=model, temperature=0.0, messages=messages,\n stream=False, response_format=response_format)\n res = completion.choices[0].message.content\n return res\n\n def get_response_format(self, is_json_format: bool):\n if is_json_format and self.classification_model in ('gpt-4-1106-preview', 'gpt-3.5-turbo-1106'):\n return {\"type\": \"json_object\"}\n else:\n return {\"type\": \"text\"}"
},
{
"identifier": "LiteLLM",
"path": "bolna/llms/litellm.py",
"snippet": "class LiteLLM(BaseLLM):\n def __init__(self, streaming_model, api_key=None, api_base=None, max_tokens=100, buffer_size=40,\n classification_model=None, temperature=0.0):\n super().__init__(max_tokens, buffer_size)\n self.model = streaming_model\n self.api_key = api_key or os.getenv('LLM_MODEL_API_KEY')\n self.api_base = api_base or os.getenv('LLM_MODEL_API_BASE')\n self.started_streaming = False\n self.max_tokens = max_tokens\n self.classification_model = classification_model\n self.temperature = temperature\n\n\n async def generate_stream(self, messages, synthesize=True):\n answer, buffer = \"\", \"\"\n logger.info(f\"request to model: {self.model}: {messages}\")\n start_time = time.time()\n async for chunk in await litellm.acompletion(model=self.model, messages=messages, api_key=self.api_key,\n api_base=self.api_base, temperature=0.2,\n max_tokens=self.max_tokens, stream=True):\n logger.info(f\"Got chunk {chunk}\")\n if (text_chunk := chunk['choices'][0]['delta'].content) and not chunk['choices'][0].finish_reason:\n answer += text_chunk\n buffer += text_chunk\n\n if len(buffer) >= self.buffer_size and synthesize:\n text = ' '.join(buffer.split(\" \")[:-1])\n\n if synthesize:\n if not self.started_streaming:\n self.started_streaming = True\n yield text\n buffer = buffer.split(\" \")[-1]\n\n if synthesize:\n if buffer != \"\":\n yield buffer\n else:\n yield answer\n self.started_streaming = False\n logger.info(f\"Time to generate response {time.time() - start_time}\")\n async def generate(self, messages, classification_task=False, stream=False, synthesize=True, request_json=False):\n model = self.classification_model if classification_task is True else self.model\n logger.info(f'Request to litellm {messages}')\n\n completion_args = {\n \"model\": model,\n \"messages\": messages,\n \"api_key\": self.api_key,\n \"api_base\": self.api_base,\n \"temperature\": self.temperature,\n \"stream\": stream\n }\n\n if request_json is True:\n completion_args['response_format'] = {\n \"type\": \"json_object\",\n \"schema\": json_to_pydantic_schema('{\"classification_label\": \"classification label goes here\"}')\n }\n completion = await litellm.acompletion(**completion_args)\n text = completion.choices[0].message.content\n return text"
}
] | from .synthesizer import PollySynthesizer, XTTSSynthesizer, ElevenlabsSynthesizer
from .transcriber import DeepgramTranscriber
from .input_handlers import DefaultInputHandler, TwilioInputHandler
from .output_handlers import DefaultOutputHandler, TwilioOutputHandler
from .llms import OpenAiLLM, LiteLLM | 9,162 |
SUPPORTED_SYNTHESIZER_MODELS = {
'polly': PollySynthesizer,
'xtts': XTTSSynthesizer,
"elevenlabs": ElevenlabsSynthesizer
}
SUPPORTED_TRANSCRIBER_MODELS = {
'deepgram': DeepgramTranscriber
}
SUPPORTED_LLM_MODELS = {
|
SUPPORTED_SYNTHESIZER_MODELS = {
'polly': PollySynthesizer,
'xtts': XTTSSynthesizer,
"elevenlabs": ElevenlabsSynthesizer
}
SUPPORTED_TRANSCRIBER_MODELS = {
'deepgram': DeepgramTranscriber
}
SUPPORTED_LLM_MODELS = { | 'openai': OpenAiLLM, | 8 | 2023-12-13 09:07:35+00:00 | 12k |
qitan/devops-backend-lite | apps/ucenter/views.py | [
{
"identifier": "FEISHU_SYNC_USER_JOB_CACHE_KEY",
"path": "common/variables.py",
"snippet": "FEISHU_SYNC_USER_JOB_CACHE_KEY = 'celery_job:feishu_user_sync'"
},
{
"identifier": "Menu",
"path": "dbapp/models.py",
"snippet": ""
},
{
"identifier": "CustomModelViewSet",
"path": "common/extends/viewsets.py",
"snippet": "class CustomModelViewSet(viewsets.ModelViewSet):\n \"\"\"\n A viewset that provides default `create()`, `retrieve()`, `update()`,\n `partial_update()`, `destroy()` and `list()` actions.\n \"\"\"\n\n def get_permission_from_role(self, request):\n try:\n perms = request.user.roles.values(\n 'permissions__method',\n ).distinct()\n return [p['permissions__method'] for p in perms]\n except AttributeError:\n return []\n\n def extend_filter(self, queryset):\n return queryset\n\n def get_queryset(self):\n \"\"\"\n Get the list of items for this view.\n This must be an iterable, and may be a queryset.\n Defaults to using `self.queryset`.\n\n This method should always be used rather than accessing `self.queryset`\n directly, as `self.queryset` gets evaluated only once, and those results\n are cached for all subsequent requests.\n\n You may want to override this if you need to provide different\n querysets depending on the incoming request.\n\n (Eg. return a list of items that is specific to the user)\n \"\"\"\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n queryset = self.extend_filter(self.queryset)\n if isinstance(queryset, QuerySet):\n # Ensure queryset is re-evaluated on each request.\n queryset = queryset.all()\n return queryset.distinct()\n\n @action(methods=['GET'], url_path='count', detail=False)\n def count(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n return Response({'code': 20000, 'data': queryset.count()})\n\n def create(self, request, *args, **kwargs):\n try:\n request.data['name'] = request.data['name'].strip(\n ' ').replace(' ', '-')\n except BaseException as e:\n print('exception ', str(e))\n serializer = self.get_serializer(data=request.data)\n if not serializer.is_valid():\n return Response({'code': 40000, 'status': 'failed', 'message': serializer.errors})\n try:\n self.perform_create(serializer)\n except BaseException as e:\n return Response({'code': 50000, 'status': 'failed', 'message': str(e)})\n log_audit(request, action_type=self.serializer_class.Meta.model.__name__, action='创建', content='',\n data=serializer.data)\n\n data = {'data': serializer.data, 'status': 'success', 'code': 20000}\n return Response(data)\n\n def list(self, request, pk=None, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n page_size = request.query_params.get('page_size')\n pagination.PageNumberPagination.page_size = page_size\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = self.get_serializer(queryset, many=True)\n data = {'data': {'total': queryset.count(), 'items': serializer.data},\n 'code': 20000, 'status': 'success'}\n return Response(data)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n partial = kwargs.pop('partial', False)\n try:\n request.data['name'] = request.data['name'].strip(\n ' ').replace(' ', '-')\n except BaseException as e:\n logger.warning(f'不包含name字段: {str(e)}')\n serializer = self.get_serializer(\n instance, data=request.data, partial=partial)\n if not serializer.is_valid():\n return Response({'code': 40000, 'status': 'failed', 'message': str(serializer.errors)})\n try:\n self.perform_update(serializer)\n except BaseException as e:\n logger.exception(f'更新失败,原因:{e}')\n return Response({'code': 50000, 'status': 'failed', 'message': str(e)})\n\n if getattr(instance, '_prefetched_objects_cache', None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # forcibly invalidate the prefetch cache on the instance.\n instance._prefetched_objects_cache = {}\n\n log_audit(request, self.serializer_class.Meta.model.__name__, '更新', content=f\"更新对象:{instance}\",\n data=serializer.data, old_data=self.serializer_class(instance).data)\n\n data = {'data': serializer.data, 'status': 'success', 'code': 20000}\n return Response(data)\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n data = {'data': serializer.data, 'code': 20000, 'status': 'success'}\n return Response(data)\n\n def destroy(self, request, *args, **kwargs):\n \"\"\"\n TODO: 删除操作物理删除 or 逻辑删除(增加删除标记字段)\n \"\"\"\n instance = self.get_object()\n try:\n self.perform_destroy(instance)\n except ProtectedError:\n # 存在关联数据,不可删除\n return Response({'code': 50000, 'status': 'failed', 'message': '存在关联数据,禁止删除!'})\n except BaseException as e:\n logger.exception(f'删除数据发生错误 {e}, {e.__class__}')\n return Response({'code': 50000, 'status': 'failed', 'message': f'删除异常: {str(e)}'})\n log_audit(request, self.serializer_class.Meta.model.__name__,\n '删除', content=f\"删除对象:{instance}\")\n\n return Response({'code': 20000, 'status': 'success', 'msg': ''})"
},
{
"identifier": "CustomModelParentViewSet",
"path": "common/extends/viewsets.py",
"snippet": "class CustomModelParentViewSet(CustomModelViewSet):\n\n def get_queryset(self):\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n queryset = self.extend_filter(self.queryset)\n if self.action == 'list':\n if not self.request.query_params.get('search'):\n queryset = queryset.filter(parent__isnull=True)\n if isinstance(queryset, QuerySet):\n queryset = queryset.all()\n return queryset.distinct()"
},
{
"identifier": "RbacPermission",
"path": "common/extends/permissions.py",
"snippet": "class RbacPermission(BasePermission):\n \"\"\"\n 自定义权限\n \"\"\"\n\n @classmethod\n def check_is_admin(cls, request):\n return request.user.is_authenticated and request.user.roles.filter(name='管理员').count() > 0\n\n @classmethod\n def get_permission_from_role(cls, request):\n try:\n perms = request.user.roles.values(\n 'permissions__method',\n ).distinct()\n return [p['permissions__method'] for p in perms]\n except AttributeError:\n return []\n\n def _has_permission(self, request, view):\n \"\"\"\n :return:\n \"\"\"\n _method = request._request.method.lower()\n platform = get_redis_data('platform')\n url_whitelist = platform['whitelist'] if platform else []\n url_whitelist.extend(\n [{'url': '/api/login/feishu/'}, {'url': '/api/login/gitlab/'}])\n path_info = request.path_info\n for item in url_whitelist:\n url = item['url']\n if url in path_info:\n logger.debug(f'请求地址 {path_info} 命中白名单 {url}, 放行')\n return True\n\n from_workflow = 'from_workflow' in request.GET\n if _method == 'get' and from_workflow:\n return True\n\n is_superuser = request.user.is_superuser\n if is_superuser:\n return True\n\n is_admin = RbacPermission.check_is_admin(request)\n perms = self.get_permission_from_role(request)\n if not is_admin and not perms:\n logger.debug(f'用户 {request.user} 不是管理员 且 权限列表为空, 直接拒绝')\n return False\n\n perms_map = view.perms_map\n\n action = view.action\n _custom_method = f'{_method}_{action}'\n for i in perms_map:\n for method, alias in i.items():\n if is_admin and (method == '*' and alias[0] == 'admin'):\n return True\n if method == '*' and alias[0] in perms:\n return True\n if _custom_method and alias[0] in perms and (_custom_method == method or method == f'*_{action}'):\n return True\n if _method == method and alias[0] in perms:\n return True\n return False\n\n def has_permission(self, request, view):\n res = self._has_permission(request, view)\n # 记录权限异常的操作\n if not res:\n AuditLog.objects.create(\n user=request.user, type='', action='拒绝操作',\n action_ip=user_ip(request),\n content=f\"请求方法:{request.method},请求路径:{request.path},UserAgent:{request.META['HTTP_USER_AGENT']}\",\n data='',\n old_data=''\n )\n return res"
},
{
"identifier": "CustomInvalidToken",
"path": "common/extends/JwtAuth.py",
"snippet": "class CustomInvalidToken(InvalidToken):\n status_code = status.HTTP_401_UNAUTHORIZED\n default_detail = 'Token不合法或者已经过期.'\n default_code = 40100"
},
{
"identifier": "TokenObtainPairSerializer",
"path": "common/extends/JwtAuth.py",
"snippet": "class TokenObtainPairSerializer(BaseTokenObtainPairSerializer):\n\n default_error_messages = {\n \"no_active_account\": \"用户名或者密码错误!\"\n }\n\n @classmethod\n def get_token(cls, user):\n token = RefreshToken.for_user(user)\n return token"
},
{
"identifier": "TokenRefreshSerializer",
"path": "common/extends/JwtAuth.py",
"snippet": "class TokenRefreshSerializer(BaseTokenRefreshSerializer):\n\n def validate(self, attrs):\n refresh = RefreshToken(attrs['refresh'])\n data = {'access': str(refresh.access_token)}\n\n if api_settings.ROTATE_REFRESH_TOKENS:\n if api_settings.BLACKLIST_AFTER_ROTATION:\n try:\n # Attempt to blacklist the given refresh token\n refresh.blacklist()\n except AttributeError:\n # If blacklist app not installed, `blacklist` method will\n # not be present\n pass\n\n refresh.set_jti()\n refresh.set_exp()\n\n data['refresh'] = str(refresh)\n\n return data"
},
{
"identifier": "log_audit",
"path": "common/extends/handler.py",
"snippet": "def log_audit(request, action_type, action, content=None, data=None, old_data=None, user=None):\n if user is None:\n user = request.user.first_name or request.user.username\n\n AuditLog.objects.create(user=user, type=action_type, action=action,\n action_ip=user_ip(request),\n content=f\"{mask_sensitive_data(content)}\\n请求方法:{request.method},请求路径:{request.path},UserAgent:{request.META['HTTP_USER_AGENT']}\",\n data=mask_sensitive_data(data),\n old_data=mask_sensitive_data(old_data))"
},
{
"identifier": "AuditLogFilter",
"path": "common/extends/filters.py",
"snippet": "class AuditLogFilter(FilterSet):\n exclude = ExcludeFilter(field_name='type', lookup_expr='in', exclude=True)\n type = CharFilter(field_name='type')\n\n class Meta:\n models = AuditLog\n fields = ['type', 'exclude']"
},
{
"identifier": "CustomSearchFilter",
"path": "common/extends/filters.py",
"snippet": "class CustomSearchFilter(SearchFilter):\n\n def get_search_fields(self, view, request):\n \"\"\"\n Search fields are obtained from the view, but the request is always\n passed to this method. Sub-classes can override this method to\n dynamically change the search fields based on request content.\n \"\"\"\n if hasattr(view, 'get_search_fields'):\n return view.get_search_fields()\n return getattr(view, 'search_fields', None)\n\n def get_search_terms(self, request):\n \"\"\"\n Search terms are set by a ?search=... query parameter,\n and may be comma and/or whitespace delimited.\n \"\"\"\n params = request.query_params.get(self.search_param, '')\n params = params.replace('\\x00', '') # strip null characters\n values = params.strip('+').split('+')\n if len(values) > 1:\n return values, 1\n params = params.replace(',', ' ')\n params = params.replace('|', ' ')\n return params.split(), 0\n\n def filter_queryset(self, request, queryset, view):\n search_fields = self.get_search_fields(view, request)\n search_param = self.get_search_terms(request)\n search_terms = search_param[0]\n search_condition = search_param[1]\n if not search_fields or not search_terms:\n return queryset\n\n orm_lookups = [\n self.construct_search(str(search_field))\n for search_field in search_fields\n ]\n\n base = queryset\n conditions = []\n for search_term in search_terms:\n queries = [\n models.Q(**{orm_lookup: search_term.strip()})\n for orm_lookup in orm_lookups\n ]\n conditions.append(reduce(operator.or_, queries))\n if search_condition == 1:\n queryset = queryset.filter(reduce(operator.and_, conditions))\n else:\n queryset = queryset.filter(reduce(operator.or_, conditions))\n\n if self.must_call_distinct(queryset, search_fields):\n # Filtering against a many-to-many field requires us to\n # call queryset.distinct() in order to avoid duplicate items\n # in the resulting queryset.\n # We try to avoid this if possible, for performance reasons.\n queryset = distinct(queryset, base)\n return queryset"
},
{
"identifier": "GlueJenkins",
"path": "common/utils/JenkinsAPI.py",
"snippet": "class GlueJenkins(Jenkins):\n\n def __init__(self, url=None, username=None, password=None):\n self.__url = url\n self.__username = username\n self.__password = password\n super(GlueJenkins, self).__init__(\n self.__url, self.__username, self.__password)\n\n def _get_encoded_params(self, params):\n for k, v in params.items():\n if k in [\"name\", \"msg\", \"short_name\", \"from_short_name\",\n \"to_short_name\", \"folder_url\", \"from_folder_url\", \"to_folder_url\"]:\n params[k] = quote(v.encode('utf8'))\n return params\n\n def _build_url(self, format_spec, variables=None):\n\n if variables:\n url_path = format_spec % self._get_encoded_params(variables)\n else:\n url_path = format_spec\n return str(urljoin(self.server, url_path))\n\n def assert_credential_exists(self, name, folder_name=None, domain_name='_',\n exception_message='credential[%s] does not exist.'):\n '''Raise an exception if credential does not exist in domain of folder\n\n :param name: Name of credential, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :param exception_message: Message to use for the exception.\n Formatted with ``name``, ``domain_name``,\n and ``folder_name``\n :throws: :class:`JenkinsException` whenever the credentail\n does not exist in domain of folder\n '''\n if not self.credential_exists(name, folder_name, domain_name):\n raise JenkinsException(exception_message\n % name)\n\n def get_credential_global_config(self, name, domain_name='_'):\n '''Get configuration of credential in domain of folder.\n :param name: Name of credentail, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Credential configuration (XML format)\n '''\n return self.jenkins_open(requests.Request(\n 'GET', self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n ))\n\n def get_credential_info(self, name, folder_name=None, domain_name='_'):\n '''Get credential information dictionary in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: folder_name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Dictionary of credential info, ``dict``\n '''\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(CREDENTIAL_INFO_GLOBAL, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('credential[%s] does not exist.' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('credential[%s] does not exist.' % name)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for credential[%s].' % name\n )\n\n def credential_exists(self, name, folder_name=None, domain_name='_'):\n '''Check whether a credentail exists in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: ``True`` if credentail exists, ``False`` otherwise\n '''\n try:\n return self.get_credential_info(name)['id'] == name\n except JenkinsException:\n return False\n\n def create_credential_global(self, name=None, user=None, password=None, secret=None, comment=None, domain_name='_'):\n '''Create credentail in domain of folder\n\n :param name: username\n :param password: password\n :param comment: comment, ``str``\n :param config_xml: New XML configuration, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n '''\n st = shortuuid.ShortUUID()\n st.set_alphabet(\n f\"0123456789{''.join([chr(i) for i in range(ord('a'), ord('z') + 1)])}\")\n if name is None:\n name = '-'.join(['api', st.random(length=8),\n st.random(length=4), st.random(length=12)])\n config_xml = '''<com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <username>%s</username>\n <password>%s</password>\n</com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>''' % (name, comment, user, password)\n if user is None:\n config_xml = '''<org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <secret>%s</secret>\n</org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>''' % (name, comment, secret)\n if self.credential_exists(name):\n raise JenkinsException('credential[%s] already exists.' % name)\n\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_CREDENTIAL_GLOBAL, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n self.assert_credential_exists(\n name, exception_message='create credential[%s] failed.')\n return {'status': 0, 'data': name}\n\n def reconfig_credential_global(self, name, user=None, password=None, secret=None, comment=None, domain_name='_'):\n \"\"\"\n Reconfig credential with new config in domain of folder\n :param name: name, ``str``\n :param user:\n :param password:\n :param secret:\n :param comment:\n :param domain_name: Domain name, default is '_', ``str``\n :return:\n \"\"\"\n reconfig_url = self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n config_xml = self.get_credential_global_config(name)\n xml_dict = xmltodict.parse(config_xml)\n if user is None:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['secret'] = secret\n if comment:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['description'] = comment\n else:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['username'] = user\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['password'] = password\n if comment:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl'][\n 'description'] = comment\n config_xml = xmltodict.unparse(xml_dict, pretty=True)\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def create_job(self, name, config_xml):\n '''Create a new Jenkins job\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: config file text, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n if self.job_exists(name):\n raise JenkinsException('job[%s] already exists' % (name))\n\n try:\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_JOB, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n except NotFoundException:\n raise JenkinsException('Cannot create job[%s] because folder '\n 'for the job does not exist' % (name))\n self.assert_job_exists(name, 'create[%s] failed')\n\n def reconfig_job(self, name, config_xml):\n '''Change configuration of existing Jenkins job.\n\n To create a new job, see :meth:`Jenkins.create_job`.\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: New XML configuration, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n reconfig_url = self._build_url(CONFIG_JOB, locals())\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def get_stage_describe(self, name, number, node_number):\n \"\"\" 获取 单个stage 详情 \"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_DES, locals())\n ))\n\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_logs(self, name, number, node_number):\n \"\"\" 获取 stage 执行日志\"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_LOG, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_info(self, name, number, depth=0):\n\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_INFO, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_flow_detail(self, job_name, build_number):\n stage_data = self.get_stage_info(name=job_name, number=build_number)\n stages = stage_data.get('stages')\n for i in stages:\n logs = ''\n try:\n # 获取stage返回信息\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(i['_links']['self']['href']), locals())\n ))\n if response:\n res = json.loads(response)\n for j in res['stageFlowNodes']:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(j['_links']['log']['href']), locals())\n ))\n res = json.loads(response)\n try:\n # 移除href html信息,保留链接文字\n import re\n pat = re.compile('<a href[^>]*>')\n logs = logs + '\\n' + \\\n pat.sub('', res['text'].replace('</a>', ''))\n except:\n pass\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (job_name, build_number)\n )\n\n stage_data[\"stages\"][stages.index(i)]['logs'] = logs\n return stage_data\n\n def get_queue_item(self, number, depth=0):\n '''Get information about a queued item (to-be-created job).\n\n The returned dict will have a \"why\" key if the queued item is still\n waiting for an executor.\n\n The returned dict will have an \"executable\" key if the queued item is\n running on an executor, or has completed running. Use this to\n determine the job number / URL.\n\n :param name: queue number, ``int``\n :returns: dictionary of queued information, ``dict``\n '''\n url = self._build_url(Q_ITEM, locals())\n try:\n response = self.jenkins_open(requests.Request('GET', url))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('queue number[%d] does not exist'\n % number)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('queue number[%d] does not exist' % number)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for queue number[%d]' % number\n )\n\n def build_job(self, name, parameters=None, token=None):\n '''Trigger build job.\n\n This method returns a queue item number that you can pass to\n :meth:`Jenkins.get_queue_item`. Note that this queue number is only\n valid for about five minutes after the job completes, so you should\n get/poll the queue information as soon as possible to determine the\n job's URL.\n\n :param name: name of job\n :param parameters: parameters for job, or ``None``, ``dict``\n :param token: Jenkins API token\n :returns: ``int`` queue item\n '''\n response = self.jenkins_request(requests.Request(\n 'POST', self.build_job_url(name, parameters, token)))\n\n if 'Location' not in response.headers:\n raise EmptyResponseException(\n \"Header 'Location' not found in \"\n \"response from server[%s]\" % self.server)\n\n location = response.headers['Location']\n if location.endswith('/'):\n location = location[:-1]\n parts = location.split('/')\n number = int(parts[-1])\n return number\n\n def get_job_config(self, name):\n '''Get configuration of existing Jenkins job.\n\n :param name: Name of Jenkins job, ``str``\n :returns: job configuration (XML format)\n '''\n folder_url, short_name = self._get_job_folder(name)\n request = requests.Request(\n 'GET', self._build_url(CONFIG_JOB, locals()))\n return self.jenkins_open(request)\n\n def get_job_info(self, name, depth=0, fetch_all_builds=False):\n '''Get job information dictionary.\n\n :param name: Job name, ``str``\n :param depth: JSON depth, ``int``\n :param fetch_all_builds: If true, all builds will be retrieved\n from Jenkins. Otherwise, Jenkins will\n only return the most recent 100\n builds. This comes at the expense of\n an additional API call which may\n return significant amounts of\n data. ``bool``\n :returns: dictionary of job information\n '''\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(JOB_INFO, locals())\n ))\n if response:\n if fetch_all_builds:\n return self._add_missing_builds(json.loads(response))\n else:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] does not exist' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] does not exist' % name)\n except ValueError:\n raise JenkinsException(\n \"Could not parse JSON info for job[%s]\" % name)"
},
{
"identifier": "user_ip",
"path": "common/get_ip.py",
"snippet": "def user_ip(request):\n \"\"\"\n 获取用户真实IP\n :param request:\n :return:\n \"\"\"\n if 'X-Real-IP' in request.META:\n return request.META['X-Real-IP']\n if 'HTTP_X_FORWARDED_FOR' in request.META:\n return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0]\n if 'REMOTE_ADDR' in request.META:\n return request.META['REMOTE_ADDR'].split(',')[0]"
},
{
"identifier": "ThirdPartyUser",
"path": "common/ext_fun.py",
"snippet": "class ThirdPartyUser(object):\n\n def get_user(self):\n user = UserProfile.objects.get_or_create(username='thirdparty')[0]\n self.set_permission(user, self.get_role())\n return user\n\n def get_role(self):\n return Role.objects.get_or_create(name='thirdparty')[0]\n\n def get_perm(self):\n return Permission.objects.get_or_create(name='Jenkins回调', method='jenkins_callback')[0]\n\n def set_permission(self, user, role):\n role.permissions.set([self.get_perm().id])\n user.roles.set([role.id])"
},
{
"identifier": "set_redis_data",
"path": "common/ext_fun.py",
"snippet": "def set_redis_data(name, config):\n cache.set(f\"system:{name}\", config, None)"
},
{
"identifier": "get_redis_data",
"path": "common/ext_fun.py",
"snippet": "def get_redis_data(name):\n ret = cache.get(f\"system:{name}\")\n if not ret:\n try:\n if name == 'cicd-harbor':\n qs = SystemConfig.objects.filter(type=name)[0]\n else:\n qs = SystemConfig.objects.get(name=name)\n except BaseException as e:\n return None\n ret = json.loads(qs.config)\n set_redis_data(name, ret)\n\n return ret"
},
{
"identifier": "timeline_generate",
"path": "common/ext_fun.py",
"snippet": "def timeline_generate(time_range, format_type='dashboard'):\n \"\"\"\n 根据起始时间生成时间线\n\n : params format_type: 默认为dashboard, 用于概览报表粗略显示, 其它用于监控类的展示则使用更细粒度的格式\n \"\"\"\n TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES\n TIME_FORMAT = DASHBOARD_TIME_FORMAT\n if format_type == 'cmdb':\n TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES_T\n TIME_FORMAT = DASHBOARD_TIME_FORMAT_T\n start_time = time_range['start_time']\n end_time = time_range['end_time']\n time_line = rrule(\n freq=TIME_FREQNAMES[time_range['name']], dtstart=start_time, until=end_time)\n return [i.strftime(TIME_FORMAT[time_range['name']]) for i in time_line]"
},
{
"identifier": "time_period",
"path": "common/ext_fun.py",
"snippet": "def time_period(time_range='6-months', type_range='static', time_zone='Asia/Shanghai', name=None):\n \"\"\"\n 根据时间范围生成起止时间\n \"\"\"\n start_time = None\n end_time = timezone.now().astimezone(pytz.timezone(time_zone))\n if type_range == 'dynamic' and name is None:\n start_time = datetime.strptime(time_range[0], '%Y-%m-%d %H:%M:%S')\n end_time = datetime.strptime(time_range[1], '%Y-%m-%d %H:%M:%S')\n if start_time > end_time:\n start_time, end_time = end_time, start_time\n if (end_time - start_time).days >= 60:\n name = 'months'\n elif (end_time - start_time).days >= 2:\n name = 'days'\n elif (end_time - start_time).days >= 1 or (end_time - start_time).seconds > 60 * 60:\n name = 'hours'\n else:\n name = 'minutes'\n return {'name': name, 'start_time': start_time, 'end_time': end_time}\n\n if type_range == 'static':\n _time = time_range.split('-')\n if _time[-1] == 'week':\n start_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute,\n seconds=end_time.second,\n microseconds=end_time.microsecond)\n return {'name': 'days', 'start_time': start_time, 'end_time': end_time}\n if _time[-1] == 'lastweek':\n start_time = end_time - relativedelta(days=end_time.weekday() + 7, hours=end_time.hour,\n minutes=end_time.minute, seconds=end_time.second,\n microseconds=end_time.microsecond)\n end_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute,\n seconds=end_time.second, microseconds=end_time.microsecond)\n return {'name': 'days', 'start_time': start_time, 'end_time': end_time}\n if _time[-1] in ['today', 'yesterday']:\n start_time = end_time - relativedelta(hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second,\n microseconds=end_time.microsecond)\n if _time[-1] == 'yesterday':\n end_time = start_time\n start_time = end_time - relativedelta(days=1)\n return {'name': 'hours', 'start_time': start_time, 'end_time': end_time}\n name = _time[1]\n if name is None:\n if _time[1] in ['years', 'months']:\n name = 'months'\n if _time[1] == 'months' and int(_time[0]) < 2:\n name = 'days'\n if _time[1] == 'days' and int(_time[0]) < 2:\n name = 'hours'\n start_time = end_time + relativedelta(**{_time[1]: -int(_time[0])})\n return {'name': name, 'start_time': start_time, 'end_time': end_time}"
},
{
"identifier": "node_filter",
"path": "common/ext_fun.py",
"snippet": "def node_filter(node_id, data):\n \"\"\"\n 查找节点\n\n :params: node_id int 节点ID\n :params: data list 节点数组\n \"\"\"\n for i in data:\n if i['id'] == node_id:\n print('get node', i)\n return i\n else:\n if i.get('children', None):\n node = node_filter(node_id, i['children'])\n if isinstance(node, (dict,)):\n return node"
},
{
"identifier": "test_notify",
"path": "qtasks/tasks.py",
"snippet": "def test_notify(receiver, notify_type='mail', robot_name=None, robot_webhook=None, robot_key=None,\n robot_type='dingtalk'):\n ret = None\n if notify_type == 'mail':\n mail_send = OmsMail()\n ret = mail_send.test_notify(receiver)\n if notify_type == 'robot':\n robot_notify = ROBOT_CATEGORIES[robot_type](robot_webhook, robot_key)\n ret = robot_notify.test_notify(receiver, robot_name)\n\n return ret"
}
] | import hashlib
import django_filters
import datetime
import time
import shortuuid
import json
import logging
from django.core.cache import cache
from rest_framework import viewsets, status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework import pagination
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
from rest_framework_simplejwt.exceptions import TokenError, InvalidToken
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework_simplejwt.tokens import RefreshToken, Token, OutstandingToken
from rest_framework.filters import SearchFilter, OrderingFilter
from django_q.tasks import async_task, result
from django.contrib.auth.models import update_last_login
from django.db.models import Q
from django.contrib.auth import logout
from common.variables import FEISHU_SYNC_USER_JOB_CACHE_KEY
from dbapp.models import Menu, Permission, Role, Organization, UserProfile, AuditLog, SystemConfig, DataDict
from ucenter.serializers import MenuSerializers, MenuListSerializers, PermissionListSerializers, PermissionSerializers, \
RoleListSerializers, \
RoleSerializers, OrganizationSerializers, \
UserProfileListSerializers, UserProfileSerializers, UserProfileDetailSerializers, AuditLogSerializers, \
AuditLogActivitySerializers, SystemConfigSerializers, \
SystemConfigListSerializers, DataDictSerializers
from common.extends.viewsets import CustomModelViewSet, CustomModelParentViewSet
from common.extends.permissions import RbacPermission
from common.extends.JwtAuth import CustomInvalidToken, TokenObtainPairSerializer, TokenRefreshSerializer
from common.extends.handler import log_audit
from common.extends.filters import AuditLogFilter, CustomSearchFilter
from common.utils.JenkinsAPI import GlueJenkins
from common.get_ip import user_ip
from common.ext_fun import ThirdPartyUser, set_redis_data, get_redis_data, timeline_generate, time_period, \
node_filter
from qtasks.tasks import test_notify
from django.conf import settings
from django.contrib.auth import login, REDIRECT_FIELD_NAME
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.views.decorators.cache import never_cache | 10,672 | {'get': ('data_list', '查看数据字典')},
{'post': ('data_create', '创建数据字典')},
{'put': ('data_edit', '编辑数据字典')},
{'patch': ('data_edit', '编辑数据字典')},
{'delete': ('data_delete', '删除数据字典')}
"""
perms_map = (
{'*': ('admin', '管理员')},
{'*': ('data_all', '数据字典管理')},
{'get': ('data_list', '查看数据字典')},
{'post': ('data_create', '创建数据字典')},
{'put': ('data_edit', '编辑数据字典')},
{'patch': ('data_edit', '编辑数据字典')},
{'delete': ('data_delete', '删除数据字典')}
)
queryset = DataDict.objects.all()
serializer_class = DataDictSerializers
filter_backends = (
django_filters.rest_framework.DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_fields = ('key', 'value')
search_fields = ('key', 'value')
def perform_update(self, serializer):
serializer.save()
cache.delete(f"datadict:{serializer.data['key']}:0")
cache.delete(f"datadict:{serializer.data['key']}:1")
@action(methods=['GET'], url_path='user', detail=False)
def get_user(self, request):
"""
获取用户列表
### 传递参数
force: 0|1
force为1时强制刷新
"""
_force = request.query_params.get('force', None)
position = request.query_params.get('position', None)
_key = str(
f'project:users:{self.request.user.id}-{self.request.query_params}')
try:
data = cache.get(_key)
except BaseException as e:
cache.delete(_key)
data = None
if not data or _force:
if position:
users = UserProfile.objects.exclude(
username='thirdparty').filter(position=position)
else:
users = UserProfile.objects.exclude(username='thirdparty')
data = [{'id': i.id, 'first_name': i.first_name, 'username': i.username, 'name': i.name, 'title': i.title,
'position': i.position} for i in users]
cache.set(_key, data, timeout=60 * 60 * 24)
return Response({'code': 20000, 'data': data})
@action(methods=['GET'], url_path='extra', detail=False)
def get_by_key(self, request):
"""
通过指定key名获取
参数: key
"""
key_name = request.query_params.get('key', None)
instance = self.queryset.get(key=key_name)
serializer = self.get_serializer(instance)
data = {'data': serializer.data, 'code': 20000, 'status': 'success'}
return Response(data)
class AuditLogViewSet(CustomModelViewSet):
"""
审计日志视图
### 审计日志权限
{'get': ('audit_list', '查看审计日志')}
"""
perms_map = (
{'*': ('admin', '管理员')},
{'get': ('audit_list', '查看审计日志')}
)
queryset = AuditLog.objects.all()
serializer_class = AuditLogSerializers
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,
CustomSearchFilter, OrderingFilter)
filter_class = AuditLogFilter
filter_fields = ('user', 'type', 'action', 'action_ip', 'operator')
search_fields = ('user', 'type', 'action', 'action_ip', 'content')
def create(self, request, *args, **kwargs):
pass
def update(self, request, *args, **kwargs):
pass
def destroy(self, request, *args, **kwargs):
pass
class MenuViewSet(CustomModelParentViewSet):
"""
菜单视图
### 菜单权限
{'*': ('menu_all', '菜单管理')},
{'get': ('menu_list', '查看菜单')},
{'post': ('menu_create', '创建菜单')},
{'put': ('menu_edit', '编辑菜单')},
{'patch': ('menu_edit', '编辑菜单')},
{'delete': ('menu_delete', '删除菜单')}
"""
perms_map = (
{'*': ('admin', '管理员')},
{'*': ('menu_all', '菜单管理')},
{'get': ('menu_list', '查看菜单')},
{'post': ('menu_create', '创建菜单')},
{'put': ('menu_edit', '编辑菜单')},
{'patch': ('menu_edit', '编辑菜单')},
{'delete': ('menu_delete', '删除菜单')}
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Charles Lai
@Contact : [email protected]
@Time : 2020/9/15 下午4:08
@FileName: views.py
@Blog :https://imaojia.com
"""
logger = logging.getLogger('drf')
DEFAULT_SESSION_TIMEOUT = None
class DataDictViewSet(CustomModelParentViewSet):
"""
数据字典视图
### 数据字典权限
{'*': ('data_all', '数据字典管理')},
{'get': ('data_list', '查看数据字典')},
{'post': ('data_create', '创建数据字典')},
{'put': ('data_edit', '编辑数据字典')},
{'patch': ('data_edit', '编辑数据字典')},
{'delete': ('data_delete', '删除数据字典')}
"""
perms_map = (
{'*': ('admin', '管理员')},
{'*': ('data_all', '数据字典管理')},
{'get': ('data_list', '查看数据字典')},
{'post': ('data_create', '创建数据字典')},
{'put': ('data_edit', '编辑数据字典')},
{'patch': ('data_edit', '编辑数据字典')},
{'delete': ('data_delete', '删除数据字典')}
)
queryset = DataDict.objects.all()
serializer_class = DataDictSerializers
filter_backends = (
django_filters.rest_framework.DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_fields = ('key', 'value')
search_fields = ('key', 'value')
def perform_update(self, serializer):
serializer.save()
cache.delete(f"datadict:{serializer.data['key']}:0")
cache.delete(f"datadict:{serializer.data['key']}:1")
@action(methods=['GET'], url_path='user', detail=False)
def get_user(self, request):
"""
获取用户列表
### 传递参数
force: 0|1
force为1时强制刷新
"""
_force = request.query_params.get('force', None)
position = request.query_params.get('position', None)
_key = str(
f'project:users:{self.request.user.id}-{self.request.query_params}')
try:
data = cache.get(_key)
except BaseException as e:
cache.delete(_key)
data = None
if not data or _force:
if position:
users = UserProfile.objects.exclude(
username='thirdparty').filter(position=position)
else:
users = UserProfile.objects.exclude(username='thirdparty')
data = [{'id': i.id, 'first_name': i.first_name, 'username': i.username, 'name': i.name, 'title': i.title,
'position': i.position} for i in users]
cache.set(_key, data, timeout=60 * 60 * 24)
return Response({'code': 20000, 'data': data})
@action(methods=['GET'], url_path='extra', detail=False)
def get_by_key(self, request):
"""
通过指定key名获取
参数: key
"""
key_name = request.query_params.get('key', None)
instance = self.queryset.get(key=key_name)
serializer = self.get_serializer(instance)
data = {'data': serializer.data, 'code': 20000, 'status': 'success'}
return Response(data)
class AuditLogViewSet(CustomModelViewSet):
"""
审计日志视图
### 审计日志权限
{'get': ('audit_list', '查看审计日志')}
"""
perms_map = (
{'*': ('admin', '管理员')},
{'get': ('audit_list', '查看审计日志')}
)
queryset = AuditLog.objects.all()
serializer_class = AuditLogSerializers
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,
CustomSearchFilter, OrderingFilter)
filter_class = AuditLogFilter
filter_fields = ('user', 'type', 'action', 'action_ip', 'operator')
search_fields = ('user', 'type', 'action', 'action_ip', 'content')
def create(self, request, *args, **kwargs):
pass
def update(self, request, *args, **kwargs):
pass
def destroy(self, request, *args, **kwargs):
pass
class MenuViewSet(CustomModelParentViewSet):
"""
菜单视图
### 菜单权限
{'*': ('menu_all', '菜单管理')},
{'get': ('menu_list', '查看菜单')},
{'post': ('menu_create', '创建菜单')},
{'put': ('menu_edit', '编辑菜单')},
{'patch': ('menu_edit', '编辑菜单')},
{'delete': ('menu_delete', '删除菜单')}
"""
perms_map = (
{'*': ('admin', '管理员')},
{'*': ('menu_all', '菜单管理')},
{'get': ('menu_list', '查看菜单')},
{'post': ('menu_create', '创建菜单')},
{'put': ('menu_edit', '编辑菜单')},
{'patch': ('menu_edit', '编辑菜单')},
{'delete': ('menu_delete', '删除菜单')}
) | queryset = Menu.objects.all() | 1 | 2023-12-13 03:09:32+00:00 | 12k |
AdaCheng/EgoThink | models/llava_legacy/model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "models/llava_legacy/model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "models/llava_legacy/model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "models/llava_legacy/model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, _, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, past_key_value)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "models/llava_legacy/model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "models/llava_legacy/model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "models/llava_legacy/model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "models/llava_legacy/model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "models/llava_legacy/model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "models/llava_legacy/model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "models/llava_legacy/model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "models/llava_legacy/model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "models/llava_legacy/model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from transformers.utils import logging | 9,134 | assert isinstance(prefix_mask, torch.Tensor)
attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
if self.attn_uses_sequence_id and sequence_id is not None:
assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
attn_bias = attn_bias[:, :, :, -s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, tok_emb: Optional[torch.FloatTensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
raise NotImplementedError('output_attentions is not implemented yet for MPT')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert tok_emb is not None
S = tok_emb.size(1)
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=x.dtype, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, past_key_value) = torch.utils.checkpoint.checkpoint(
block,
x, past_key_value, attn_bias, attention_mask, self.is_causal
)
else:
(x, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
x = self.norm_f(x)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name']
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
logger = logging.get_logger(__name__)
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = nn.Embedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max)
self._attn_bias_initialized = True
if self.attn_impl == 'flash':
return (self.attn_bias, attention_mask)
if self.attn_bias is not None:
self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
attn_bias = self.attn_bias
if self.prefix_lm:
assert isinstance(attn_bias, torch.Tensor)
assert isinstance(prefix_mask, torch.Tensor)
attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
if self.attn_uses_sequence_id and sequence_id is not None:
assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
attn_bias = attn_bias[:, :, :, -s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, tok_emb: Optional[torch.FloatTensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
raise NotImplementedError('output_attentions is not implemented yet for MPT')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert tok_emb is not None
S = tok_emb.size(1)
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=x.dtype, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, past_key_value) = torch.utils.checkpoint.checkpoint(
block,
x, past_key_value, attn_bias, attention_mask, self.is_causal
)
else:
(x, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
x = self.norm_f(x)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name'] | MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config) | 10 | 2023-12-05 14:17:17+00:00 | 12k |
modelscope/llmuses | llmuses/run_ms.py | [
{
"identifier": "DATASET_ID",
"path": "llmuses/benchmarks/ceval/ceval_adapter.py",
"snippet": "DATASET_ID = 'modelscope/ceval-exam'"
},
{
"identifier": "DATASET_ID",
"path": "llmuses/benchmarks/mmlu/mmlu_adapter.py",
"snippet": "DATASET_ID = 'modelscope/mmlu'"
},
{
"identifier": "DATASET_ID",
"path": "llmuses/benchmarks/hellaswag/hellaswag_adapter.py",
"snippet": "DATASET_ID = 'modelscope/hellaswag'"
},
{
"identifier": "DATASET_ID",
"path": "llmuses/benchmarks/arc/arc_adapter.py",
"snippet": "DATASET_ID = 'modelscope/ai2_arc'"
},
{
"identifier": "DATASET_ID",
"path": "llmuses/benchmarks/truthful_qa/truthful_qa_adapter.py",
"snippet": "DATASET_ID = 'modelscope/truthful_qa'"
},
{
"identifier": "DEFAULT_ROOT_CACHE_DIR",
"path": "llmuses/constants.py",
"snippet": "DEFAULT_ROOT_CACHE_DIR = '~/.cache/llmuses'"
},
{
"identifier": "Evaluator",
"path": "llmuses/evaluator/evaluator.py",
"snippet": "class Evaluator(object):\n\n \"\"\"\n The evaluator for model on datasets.\n \"\"\"\n\n def __init__(self,\n dataset_name_or_path: str,\n data_adapter: DataAdapter,\n subset_list: Optional[list] = None,\n model_adapter: Optional[BaseModelAdapter] = None,\n use_cache: bool = True,\n mem_cache_method: str = 'ttl',\n root_cache_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,\n outputs_dir: Optional[str] = '',\n is_custom_outputs_dir: bool = False,\n datasets_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,\n stage: Optional[str] = 'all',\n **kwargs):\n\n self.dataset_name_or_path = dataset_name_or_path\n self.root_cache_dir = os.path.expanduser(root_cache_dir)\n self.datasets_dir = os.path.expanduser(datasets_dir)\n self.kwargs = kwargs\n self.data_adapter = data_adapter\n self.model_adapter = model_adapter\n\n self.model_cfg = self.model_adapter.model_cfg\n self.model_id = self.model_cfg['model_id']\n self.model_revision = self.model_cfg.get('revision', None)\n self.model_revision_str = self.model_revision if self.model_revision is not None else 'none'\n\n # Get default outputs_dir\n if not is_custom_outputs_dir:\n outputs_dir = make_outputs_dir(work_dir=outputs_dir,\n model_id=self.model_id,\n model_revision=self.model_revision_str)\n\n self.outputs_dir = os.path.expanduser(outputs_dir)\n\n # Deal with the output paths\n self.outputs_structure = make_outputs_structure(self.outputs_dir)\n\n # Load dataset\n self.dataset = self.data_adapter.load(dataset_name_or_path=dataset_name_or_path,\n subset_list=subset_list,\n work_dir=self.datasets_dir,\n **kwargs)\n\n # Get prompts from dataset\n self.prompts = self.data_adapter.gen_prompts(data_dict=self.dataset)\n del self.dataset\n\n # Init memory cache\n # TODO: refactor mem cache manager\n mem_cache_file_name = self.dataset_name_or_path.replace('/', '_') + \\\n '_' + self.model_id.replace('/', '_') + \\\n '_' + self.model_revision_str + \\\n '_cache.pkl'\n self.mem_cache_path = os.path.join(self.root_cache_dir, 'mem_cache', mem_cache_file_name)\n self.use_cache = use_cache\n self.mem_cache_method = mem_cache_method\n self.mem_cache = None\n if self.use_cache:\n self.mem_cache = init_mem_cache(method=self.mem_cache_method, cache_file_path=self.mem_cache_path)\n logger.info(f'** Using memory cache with size: {len(self.mem_cache)}')\n\n def _pred_answer(self,\n input_d: dict,\n infer_cfg: dict,\n subset_name: str,\n answer_id: str = None) -> dict:\n\n # Get answer from memory cache\n if self.mem_cache is not None:\n if answer_id in self.mem_cache:\n logger.info(f'** Reusing answer `{answer_id}` in memory cache.')\n return self.mem_cache[answer_id]\n\n ans: dict = self.model_adapter.predict(inputs=input_d, infer_cfg=infer_cfg)\n ans[AnswerKeys.ANSWER_ID] = answer_id\n ans[AnswerKeys.SUBSET_NAME] = subset_name\n\n if self.mem_cache is not None:\n self.mem_cache[answer_id] = ans\n\n return ans\n\n def get_answers(self,\n subset_name: str,\n prompts_list: List[dict],\n infer_cfg: dict = None,\n debug: bool = False,\n **kwargs) -> list:\n \"\"\"\n Get answers from model inference.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n subset_name: subset name for benchmark.\n prompts_list: prompts list.\n infer_cfg: model inference config.\n Attributes:\n do_sample: bool, whether to use sampling.\n top_k: int, the number of highest probability vocabulary tokens to keep for top-k-filtering.\n top_p: float, if set to float < 1, only the most probable tokens with probabilities to add.\n temperature: float, the value used to module the next token probabilities.\n num_beams: int, number of beams for beam search. 1 means no beam search.\n max_length: int, the max length of the sequence to be generated.\n max_new_tokens: int, the max number of new tokens to be generated.\n repetition_penalty: float, the parameter for repetition penalty. 1.0 means no penalty.\n debug: whether to run in debug mode.\n **kwargs: kwargs.\n\n Returns: The list of answers.\n \"\"\"\n assert self.data_adapter is not None, 'data_adapter must be provided when calling func get_answers() !'\n assert self.model_adapter is not None, 'model must be provided when calling func get_answers() !'\n\n answers_list = []\n for input_prompt in tqdm(prompts_list, total=len(prompts_list), desc=f'Predicting({subset_name}): '):\n\n # Gen answer_id (concat: model_cfg + input_prompt + infer_cfg)\n model_cfg_str = json.dumps(\n OrderedDict(sorted(dict_torch_dtype_to_str(self.model_adapter.model_cfg).items())),\n ensure_ascii=False)\n input_prompt_str = json.dumps(OrderedDict(sorted(dict_torch_dtype_to_str(input_prompt).items())),\n ensure_ascii=False)\n infer_cfg_str = json.dumps(OrderedDict(sorted(dict_torch_dtype_to_str(infer_cfg).items())),\n ensure_ascii=False)\n answer_id = 'answer-' + gen_hash(model_cfg_str + input_prompt_str + infer_cfg_str)\n\n # Get answers\n answer_d: dict = self._pred_answer(input_d=input_prompt,\n infer_cfg=infer_cfg,\n subset_name=subset_name,\n answer_id=answer_id)\n\n answer_d[AnswerKeys.MODEL_SPEC] = self.model_adapter.model_cfg\n answer_d[AnswerKeys.RAW_INPUT] = input_prompt[AnswerKeys.RAW_INPUT]\n answer_d[AnswerKeys.ORIGIN_PROMPT] = input_prompt\n\n if debug:\n logger.debug(f'**input_prompt: {json.dumps(input_prompt, ensure_ascii=False)} \\n')\n logger.debug(f'**predicted ans: {json.dumps(answer_d, ensure_ascii=False)} \\n')\n\n answers_list.append(answer_d)\n\n # Dump answers\n pred_dir: str = self.outputs_structure.get(OutputsStructure.PREDICTIONS_DIR)\n pred_file_name: str = self.dataset_name_or_path.replace('/', '_') + '_' + subset_name + '.jsonl'\n os.makedirs(pred_dir, exist_ok=True)\n dump_jsonl_data(answers_list, os.path.join(pred_dir, pred_file_name))\n\n return answers_list\n\n def _get_review(self,\n answer_d: dict,\n review_id: str = None,\n reviewer_spec: dict = None) -> dict:\n\n # Get review from memory cache\n if self.mem_cache is not None:\n if review_id in self.mem_cache:\n logger.info(f'** Reusing review `{review_id}` in memory cache.')\n return self.mem_cache[review_id]\n\n if reviewer_spec is None:\n reviewer_spec = {}\n\n review_res = deepcopy(answer_d)\n choices = review_res[AnswerKeys.CHOICES]\n if len(choices) == 0:\n review_res[ReviewKeys.REVIEWED] = False\n review_res[ReviewKeys.REVIEW_ID] = None\n review_res[ReviewKeys.REVIEWER_SPEC] = reviewer_spec\n review_res[ReviewKeys.REVIEW_TIME] = time.time()\n return review_res\n\n rev_choices = []\n for choice in choices:\n raw_input_d: dict = review_res[AnswerKeys.RAW_INPUT]\n answer_content = choice[ReviewKeys.MESSAGE][ReviewKeys.CONTENT]\n answer_content = self.data_adapter.parse_pred_result(answer_content, raw_input_d)\n gold_content = self.data_adapter.get_gold_answer(raw_input_d)\n\n review_result = self.data_adapter.match(gold_content, answer_content)\n choice[ReviewKeys.REVIEW] = {ReviewKeys.GOLD: gold_content,\n ReviewKeys.PRED: answer_content,\n ReviewKeys.RESULT: review_result}\n\n rev_choices.append(choice)\n\n review_res[AnswerKeys.CHOICES] = rev_choices\n review_res[ReviewKeys.REVIEWED] = True\n review_res[ReviewKeys.REVIEW_ID] = review_id\n review_res[ReviewKeys.REVIEWER_SPEC] = reviewer_spec\n review_res[ReviewKeys.REVIEW_TIME] = time.time()\n\n if self.mem_cache is not None:\n self.mem_cache[review_id] = review_res\n\n return review_res\n\n def get_reviews(self, subset_name: str, answers_list: List[dict], debug: bool = False, **kwargs) -> list:\n \"\"\"\n Get reviews from answers.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n subset_name: subset name of benchmark\n answers_list: inference results list.\n debug: whether to run in debug mode.\n **kwargs: kwargs.\n\n Returns: reviews list.\n \"\"\"\n reviews_list = []\n for answer_d in tqdm(answers_list, total=len(answers_list), desc=f'Reviewing({subset_name}): '):\n\n # Gen review_id (concat: answer_id + reviewer_spec)\n answer_id = answer_d[AnswerKeys.ANSWER_ID]\n\n reviewer_spec: dict = {'metric': [metric_d['name'] for metric_d in self.data_adapter.metric_list],\n 'reviewer': ['Evaluator'],\n 'revision': ['default']}\n reviewer_spec_str = json.dumps(OrderedDict(sorted(dict_torch_dtype_to_str(reviewer_spec).items())),\n ensure_ascii=False)\n review_id = 'review-' + gen_hash(answer_id + reviewer_spec_str)\n\n # Get review\n review_d = self._get_review(answer_d=answer_d, review_id=review_id, reviewer_spec=reviewer_spec)\n\n if debug:\n logger.debug(review_d)\n\n reviews_list.append(review_d)\n\n # Dump reviews\n review_dir: str = self.outputs_structure.get(OutputsStructure.REVIEWS_DIR)\n review_file_name: str = self.dataset_name_or_path.replace('/', '_') + '_' + subset_name + '.jsonl'\n os.makedirs(review_dir, exist_ok=True)\n dump_jsonl_data(reviews_list, os.path.join(review_dir, review_file_name))\n\n return reviews_list\n\n def compute_metrics(self, reviews_list: List[dict]) -> Any:\n \"\"\"\n To compute metrics from reviews_list for each subset.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n reviews_list: reviews list.\n\n Returns:\n The metric result. Depends on the metric function in data_adapter.\n \"\"\"\n\n review_res_list = []\n for review_d in reviews_list:\n if not review_d[ReviewKeys.REVIEWED]:\n logger.warning(f'** Review not finished for answer_id: {review_d[AnswerKeys.ANSWER_ID]}')\n continue\n\n review_res = review_d[AnswerKeys.CHOICES][0][ReviewKeys.REVIEW][ReviewKeys.RESULT]\n review_res_list.append(review_res)\n\n metric_score: Union[float, dict] = self.data_adapter.compute_metric(review_res_list=review_res_list)\n\n return metric_score\n\n def dump_report(self, report_map: dict, use_table: bool = True):\n \"\"\"\n Get report for total reviews of specific dataset.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n report_map: report dict. Generated by func self.data_adapter.gen_report().\n use_table: whether to generate table for reports. Default to True.\n\n Returns: None\n \"\"\"\n\n # Dump report\n report_dir: str = self.outputs_structure[OutputsStructure.REPORTS_DIR]\n report_file_name: str = self.dataset_name_or_path.replace('/', '_') + '.json'\n os.makedirs(report_dir, exist_ok=True)\n report_path: str = os.path.join(report_dir, report_file_name)\n with open(report_path, 'w') as f:\n f.write(json.dumps(report_map, ensure_ascii=False, indent=4))\n # logger.info(f'** Dump report to {report_path} \\n')\n logger.info(f'** Dump report: {report_file_name} \\n')\n\n if use_table:\n try:\n # Make table\n report_table: str = gen_table([report_dir])\n logger.info(f'** Report table: \\n {report_table} \\n')\n except:\n logger.error('Failed to generate report table.')\n\n def save_cache(self):\n if self.mem_cache is not None:\n logger.info(f'** Saving memory cache with size: {len(self.mem_cache)}')\n Cache.save(cache=self.mem_cache, path=self.mem_cache_path)\n\n def clear_cache(self):\n \"\"\"\n Clear memory cache.\n\n Returns: None\n \"\"\"\n if self.mem_cache is not None:\n cache_len = len(self.mem_cache)\n self.mem_cache.clear()\n logger.info(f'** Memory cache cleared, length changed: {cache_len} -> {len(self.mem_cache)}')\n\n def eval(self,\n infer_cfg: dict = None,\n debug: bool = False,\n **kwargs):\n \"\"\"\n Evaluate the model on the specific benchmark. Streaming & parallel mode is supported.\n It is required to rewrite this method to support your own evaluator.\n\n The evaluation process is as follows:\n 1. Get the input samples from the dataset (benchmarks on the ModelScope or HuggingFace).\n 2. Get the input prompts from dataset with specific data adapter.\n 3. Get answers with model inference.\n 4. Get reviews with metric function (or reviewers).\n 5. Generate report from review results.\n\n Args:\n infer_cfg: The config for model inference.\n debug: Whether to run in debug mode. Default: False.\n\n Returns:\n None.\n \"\"\"\n\n logger.info(f'**** Start evaluating on dataset {self.dataset_name_or_path} ****')\n\n reviews_map_all = {} # {subset_name: (score, num)}\n for subset_name, prompts_list in self.prompts.items():\n limit = infer_cfg.get('limit', len(prompts_list))\n prompts_list = prompts_list[:limit]\n\n answers_list: list = self.get_answers(subset_name=subset_name,\n prompts_list=prompts_list,\n infer_cfg=infer_cfg,\n debug=debug,\n **kwargs)\n\n reviews_list: list = self.get_reviews(subset_name=subset_name,\n answers_list=answers_list,\n debug=debug,\n **kwargs)\n\n metric_res = self.compute_metrics(reviews_list=reviews_list)\n reviews_map_all[subset_name] = (metric_res, len(reviews_list))\n\n # Generate report\n report_map: dict = self.data_adapter.gen_report(subset_score_map=reviews_map_all)\n self.dump_report(report_map=report_map)\n\n self.save_cache()\n self.clear_cache()\n\n logger.info(f'\\n**** Evaluation finished on {self.dataset_name_or_path} ****\\n')"
},
{
"identifier": "MultiChoiceModelAdapter",
"path": "llmuses/models/model_adapter.py",
"snippet": "class MultiChoiceModelAdapter(BaseModelAdapter):\n \"\"\" The multi-choice model adapter. \"\"\"\n\n _DEFAULT_MAX_LENGTH = 2048\n\n def __init__(self,\n model_id: str,\n device_map: str = 'auto',\n torch_dtype: dtype = torch.bfloat16,\n model_revision: str = None,\n max_length: int = None,\n **kwargs):\n \"\"\"\n Args:\n model_id: The model id on ModelScope, or local model_dir. TODO: torch.nn.module to be supported.\n device_map: The device map for model inference.\n torch_dtype: The torch dtype for model inference. Default: torch.bfloat16.\n model_revision: The model revision on ModelScope. Default: None.\n max_length: The max length of input sequence. Default: None.\n **kwargs: Other args.\n \"\"\"\n\n self.model_id: str = model_id\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n torch_dtype = torch_dtype if torch_dtype is not None else 'auto'\n\n model_cfg: dict = dict()\n model_cfg['model_id'] = model_id\n model_cfg['device_map'] = device_map\n model_cfg['torch_dtype'] = str(torch_dtype)\n\n from modelscope.utils.hf_util import AutoModelForCausalLM, AutoTokenizer\n\n tokenizer = AutoTokenizer.from_pretrained(self.model_id,\n revision=model_revision,\n trust_remote_code=True,)\n\n model = AutoModelForCausalLM.from_pretrained(self.model_id,\n revision=model_revision,\n device_map=device_map,\n trust_remote_code=True,\n torch_dtype=torch_dtype,)\n\n # model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True)\n\n super().__init__(model=model, tokenizer=tokenizer, model_cfg=model_cfg)\n\n self._max_length = max_length\n\n @property\n def max_length(self):\n if self._max_length:\n return self._max_length\n seqlen_config_attrs = ('n_positions', 'max_position_embeddings', 'n_ctx')\n for attr in seqlen_config_attrs:\n if hasattr(self.model.config, attr):\n return getattr(self.model.config, attr)\n if hasattr(self.tokenizer, 'model_max_length'):\n if self.tokenizer.model_max_length == 1000000000000000019884624838656:\n return self._DEFAULT_MAX_LENGTH\n return self.tokenizer.model_max_length\n return self._DEFAULT_MAX_LENGTH\n\n @torch.no_grad()\n def predict(self, inputs: dict, infer_cfg: dict = None) -> dict:\n \"\"\"\n Multi-choice model prediction func.\n\n Args:\n inputs (dict): The inputs for a doc. Format:\n {'data': [full_prompt], 'multi_choices': ['A', 'B', 'C', 'D']}\n\n infer_cfg (dict): inference configuration.\n\n Returns:\n res (dict): The model prediction results. Format:\n {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': [-14.9609, -13.6015, ...], # loglikelihood values for inputs context-continuation pairs.\n 'role': 'assistant'\n }\n }\n ],\n 'created': 1677664795,\n # For models on the ModelScope or HuggingFace, concat model_id and revision with \"-\".\n 'model': 'gpt-3.5-turbo-0613',\n 'object': 'chat.completion',\n 'usage': {\n 'completion_tokens': 17,\n 'prompt_tokens': 57,\n 'total_tokens': 74\n }\n }\n \"\"\"\n\n # TODO: unused\n if infer_cfg is None:\n infer_cfg = {'do_sample': True, 'max_length': 1024}\n\n input_data = inputs['data']\n multi_choices = inputs['multi_choices']\n\n output, input_info = self._get_logits(self.tokenizer, self.model, input_data)\n assert output.shape[0] == 1\n logits = output.flatten()\n\n choice_logits = [logits[self.tokenizer(ch)['input_ids'][-1:]] for ch in multi_choices]\n softval = torch.nn.functional.softmax(torch.tensor(choice_logits).float(), dim=0)\n\n if softval.dtype in {torch.bfloat16, torch.float16}:\n softval = softval.to(dtype=torch.float32)\n probs = softval.detach().cpu().numpy()\n pred: str = multi_choices[int(np.argmax(probs))] # Format: A or B or C or D\n\n res_d = {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': pred,\n 'role': 'assistant'\n }\n }\n ],\n 'created': time.time(),\n 'model': self.model_id,\n 'object': 'chat.completion',\n 'usage': {}\n }\n\n return res_d\n\n @staticmethod\n def _get_logits(tokenizer, model, inputs: List[str]):\n input_ids = tokenizer(inputs, padding=False)['input_ids']\n input_ids = torch.tensor(input_ids, device=model.device)\n tokens = {'input_ids': input_ids}\n\n outputs = model(input_ids)['logits']\n logits = outputs[:, -1, :]\n log_probs = torch.nn.functional.softmax(logits, dim=-1)\n return log_probs, {'tokens': tokens}"
},
{
"identifier": "ContinuationLogitsModelAdapter",
"path": "llmuses/models/model_adapter.py",
"snippet": "class ContinuationLogitsModelAdapter(MultiChoiceModelAdapter):\n\n def __init__(self,\n model_id: str,\n device_map: str = 'auto',\n torch_dtype: dtype = torch.bfloat16,\n model_revision: str = None,\n **kwargs):\n \"\"\"\n Continuation-logits model adapter.\n\n Args:\n model_id: The model id on ModelScope, or local model_dir.\n device_map: The device map for model inference.\n torch_dtype: The torch dtype for model inference. Default: torch.bfloat16.\n model_revision: The model revision on ModelScope. Default: None.\n **kwargs: Other args.\n \"\"\"\n\n super().__init__(model_id=model_id,\n device_map=device_map,\n torch_dtype=torch_dtype,\n model_revision=model_revision,\n **kwargs)\n\n @torch.no_grad()\n def predict(self, inputs: dict, infer_cfg: dict = None) -> dict:\n \"\"\"\n Multi-choice model prediction func.\n Args:\n inputs (dict): The inputs for a doc. Format:\n {'data': [(context, continuation), ...]}\n infer_cfg (dict): inference configuration.\n Returns:\n res (dict): The model prediction results. Format:\n {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': [-14.9609, -13.6015, ...], # loglikelihood values for inputs context-continuation pairs.\n 'role': 'assistant'\n }\n }\n ],\n 'created': 1677664795,\n # For models on the ModelScope or HuggingFace, concat model_id and revision with \"-\".\n 'model': 'gpt-3.5-turbo-0613',\n 'object': 'chat.completion',\n 'usage': {\n 'completion_tokens': 17,\n 'prompt_tokens': 57,\n 'total_tokens': 74\n }\n }\n \"\"\"\n if infer_cfg is None:\n infer_cfg = {'do_sample': True, 'max_length': 2048}\n\n pred_list: list = self.loglikelihood(inputs=inputs['data'], infer_cfg=infer_cfg)\n\n res_d = {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': pred_list,\n 'role': 'assistant'\n }\n }\n ],\n 'created': time.time(),\n 'model': self.model_id,\n 'object': 'chat.completion',\n 'usage': {}\n }\n return res_d\n\n def loglikelihood(self, inputs: list, infer_cfg: dict = None) -> list:\n # To predict one doc\n doc_ele_pred = []\n for ctx, continuation in inputs:\n\n # ctx_enc shape: [context_tok_len] cont_enc shape: [continuation_tok_len]\n ctx_enc, cont_enc = self._encode_pair(ctx, continuation)\n\n inputs_tokens = torch.tensor(\n (ctx_enc.tolist() + cont_enc.tolist())[-(self.max_length + 1):][:-1],\n dtype=torch.long,\n device=self.model.device).unsqueeze(0)\n\n logits = self.model(inputs_tokens)[0]\n logits = torch.nn.functional.log_softmax(logits.float(), dim=-1)\n\n logits = logits[:, -len(cont_enc):, :]\n cont_enc = cont_enc.unsqueeze(0).unsqueeze(-1)\n logits = torch.gather(logits.cpu(), 2, cont_enc.cpu()).squeeze(-1)\n\n choice_score = float(logits.sum())\n doc_ele_pred.append(choice_score)\n\n # e.g. [-2.3, -9.2, -12.9, 1.1], length=len(choices)\n return doc_ele_pred\n\n def _encode_pair(self, context, continuation):\n n_spaces = len(context) - len(context.rstrip())\n if n_spaces > 0:\n continuation = context[-n_spaces:] + continuation\n context = context[:-n_spaces]\n\n whole_enc = self.tokenizer(context + continuation, padding=False)['input_ids']\n whole_enc = torch.tensor(whole_enc, device=self.device)\n\n context_enc = self.tokenizer(context, padding=False)['input_ids']\n context_enc = torch.tensor(context_enc, device=self.device)\n\n context_enc_len = len(context_enc)\n continuation_enc = whole_enc[context_enc_len:]\n\n return context_enc, continuation_enc"
},
{
"identifier": "get_logger",
"path": "llmuses/utils/logger.py",
"snippet": "def get_logger(log_file: Optional[str] = None,\n log_level: int = logging.INFO,\n file_mode: str = 'w'):\n \"\"\" Get logging logger\n\n Args:\n log_file: Log filename, if specified, file handler will be added to\n logger\n log_level: Logging level.\n file_mode: Specifies the mode to open the file, if filename is\n specified (if filemode is unspecified, it defaults to 'w').\n \"\"\"\n\n logger_name = __name__.split('.')[0]\n logger = logging.getLogger(logger_name)\n\n if logger_name in init_loggers:\n add_file_handler_if_needed(logger, log_file, file_mode, log_level)\n return logger\n\n for handler in logger.root.handlers:\n if type(handler) is logging.StreamHandler:\n handler.setLevel(logging.ERROR)\n\n stream_handler = logging.StreamHandler()\n handlers = [stream_handler]\n\n if log_file is not None:\n file_handler = logging.FileHandler(log_file, file_mode)\n handlers.append(file_handler)\n\n for handler in handlers:\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n logger.addHandler(handler)\n\n logger.setLevel(log_level)\n\n init_loggers[logger_name] = True\n\n return logger"
}
] | import argparse
import torch
from llmuses.benchmarks.ceval import DATASET_ID as CEVAL_EXAM
from llmuses.benchmarks.mmlu import DATASET_ID as MMLU
from llmuses.benchmarks.hellaswag import DATASET_ID as HELLA_SWAG
from llmuses.benchmarks.arc import DATASET_ID as ARC
from llmuses.benchmarks.truthful_qa import DATASET_ID as TRUTHFUL_QA
from llmuses.constants import DEFAULT_ROOT_CACHE_DIR
from llmuses.evaluator import Evaluator
from llmuses.models.model_adapter import MultiChoiceModelAdapter, ContinuationLogitsModelAdapter
from llmuses.utils.logger import get_logger
from llmuses.models.dummy_chat_model import DummyChatModel
from llmuses.benchmarks.ceval import CEVALAdapter
from llmuses.benchmarks.mmlu import MMLUAdapter
from llmuses.benchmarks.arc import ARCAdapter
from llmuses.benchmarks.hellaswag import HellaSwagAdapter
from llmuses.benchmarks.truthful_qa import TruthfulQaAdapter | 7,391 | # Copyright (c) Alibaba, Inc. and its affiliates.
# flake8: noqa
logger = get_logger()
# TODO: add more precision
MODEL_PRECISION_MAP = {'fp16': torch.float16, 'fp32': torch.float32, 'bf16': torch.bfloat16}
"""
Run evaluation process for ModelScope Leaderboard.
"""
def parse_args():
parser = argparse.ArgumentParser(description='Run evaluation on a model')
parser.add_argument('--model', help='Model id from modelscope or huggingface.', required=True)
parser.add_argument('--revision', help='Model revision.', required=False, default=None)
parser.add_argument('--precision', help='Model precision.', default='bf16')
parser.add_argument('--work-dir', help='root work cache dir.', default=None)
parser.add_argument('--outputs-dir', help='Outputs dir.', default='outputs')
parser.add_argument('--datasets-dir', help='Datasets dir.', default=DEFAULT_ROOT_CACHE_DIR)
parser.add_argument('--device-map', help='device map.', default='auto')
parser.add_argument('--max-eval-size', type=int, help='Max evaluation samples num for each subset', default=None)
parser.add_argument('--dataset-id', help='Dataset id on modelscope', required=False, default=None)
parser.add_argument('--debug',
help='Debug mode, will print information for debugging.',
action='store_true',
default=False)
parser.add_argument('--dry-run',
help='Dry run in single processing mode.',
action='store_true',
default=False)
parser.add_argument('--mem-cache',
help='To use memory cache or not.',
action='store_true',
default=False)
args = parser.parse_args()
return args
def main():
args = parse_args()
logger.info(args)
# Customize your target datasets here
all_benchmarks = [CEVAL_EXAM, MMLU, ARC, HELLA_SWAG, TRUTHFUL_QA]
dataset_id = args.dataset_id
if dataset_id is None:
datasets = all_benchmarks
elif dataset_id in all_benchmarks:
datasets = [dataset_id]
else:
raise ValueError(f'Unknown dataset: {dataset_id}, Supported datasets: {all_benchmarks}')
# Get model instance
if args.dry_run:
model_adapter = DummyChatModel(model_cfg=dict()) # TODO
model_id: str = 'dummy'
model_revision: str = 'v1.0.0'
model_precision = MODEL_PRECISION_MAP.get(args.precision, torch.bfloat16)
else:
model_id: str = args.model
model_revision: str = args.revision
model_precision = MODEL_PRECISION_MAP.get(args.precision, torch.bfloat16)
| # Copyright (c) Alibaba, Inc. and its affiliates.
# flake8: noqa
logger = get_logger()
# TODO: add more precision
MODEL_PRECISION_MAP = {'fp16': torch.float16, 'fp32': torch.float32, 'bf16': torch.bfloat16}
"""
Run evaluation process for ModelScope Leaderboard.
"""
def parse_args():
parser = argparse.ArgumentParser(description='Run evaluation on a model')
parser.add_argument('--model', help='Model id from modelscope or huggingface.', required=True)
parser.add_argument('--revision', help='Model revision.', required=False, default=None)
parser.add_argument('--precision', help='Model precision.', default='bf16')
parser.add_argument('--work-dir', help='root work cache dir.', default=None)
parser.add_argument('--outputs-dir', help='Outputs dir.', default='outputs')
parser.add_argument('--datasets-dir', help='Datasets dir.', default=DEFAULT_ROOT_CACHE_DIR)
parser.add_argument('--device-map', help='device map.', default='auto')
parser.add_argument('--max-eval-size', type=int, help='Max evaluation samples num for each subset', default=None)
parser.add_argument('--dataset-id', help='Dataset id on modelscope', required=False, default=None)
parser.add_argument('--debug',
help='Debug mode, will print information for debugging.',
action='store_true',
default=False)
parser.add_argument('--dry-run',
help='Dry run in single processing mode.',
action='store_true',
default=False)
parser.add_argument('--mem-cache',
help='To use memory cache or not.',
action='store_true',
default=False)
args = parser.parse_args()
return args
def main():
args = parse_args()
logger.info(args)
# Customize your target datasets here
all_benchmarks = [CEVAL_EXAM, MMLU, ARC, HELLA_SWAG, TRUTHFUL_QA]
dataset_id = args.dataset_id
if dataset_id is None:
datasets = all_benchmarks
elif dataset_id in all_benchmarks:
datasets = [dataset_id]
else:
raise ValueError(f'Unknown dataset: {dataset_id}, Supported datasets: {all_benchmarks}')
# Get model instance
if args.dry_run:
model_adapter = DummyChatModel(model_cfg=dict()) # TODO
model_id: str = 'dummy'
model_revision: str = 'v1.0.0'
model_precision = MODEL_PRECISION_MAP.get(args.precision, torch.bfloat16)
else:
model_id: str = args.model
model_revision: str = args.revision
model_precision = MODEL_PRECISION_MAP.get(args.precision, torch.bfloat16)
| model_adapter = MultiChoiceModelAdapter(model_id=model_id, | 7 | 2023-12-07 06:10:49+00:00 | 12k |
liujin112/PortraitDiffusion | main.py | [
{
"identifier": "MasaCtrlPipeline",
"path": "utils/pipeline.py",
"snippet": "class MasaCtrlPipeline(StableDiffusionPipeline):\n\n def next_step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta=0.,\n verbose=False\n ):\n \"\"\"\n Inverse sampling for DDIM Inversion\n \"\"\"\n if verbose:\n print(\"timestep: \", timestep)\n next_step = timestep\n timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999)\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod\n alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step]\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output\n x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir\n return x_next, pred_x0\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta: float=0.0,\n verbose=False,\n ):\n \"\"\"\n predict the sampe the next step in the denoise process.\n \"\"\"\n prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output\n x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir\n return x_prev, pred_x0\n\n @torch.no_grad()\n def image2latent(self, image):\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if type(image) is Image:\n image = np.array(image)\n image = torch.from_numpy(image).float() / 127.5 - 1\n image = image.permute(2, 0, 1).unsqueeze(0).to(DEVICE)\n # input image density range [-1, 1]\n latents = self.vae.encode(image)['latent_dist'].mean\n latents = latents * 0.18215\n return latents\n\n @torch.no_grad()\n def latent2image(self, latents, return_type='np'):\n latents = 1 / 0.18215 * latents.detach()\n image = self.vae.decode(latents)['sample']\n if return_type == 'np':\n image = (image / 2 + 0.5).clamp(0, 1)\n image = image.cpu().permute(0, 2, 3, 1).numpy()[0]\n image = (image * 255).astype(np.uint8)\n elif return_type == \"pt\":\n image = (image / 2 + 0.5).clamp(0, 1)\n\n return image\n\n def latent2image_grad(self, latents):\n latents = 1 / 0.18215 * latents\n image = self.vae.decode(latents)['sample']\n\n return image # range [-1, 1]\n\n @torch.no_grad()\n def __call__(\n self,\n prompt,\n batch_size=1,\n height=512,\n width=512,\n num_inference_steps=50,\n guidance_scale=7.5,\n eta=0.0,\n latents=None,\n unconditioning=None,\n neg_prompt=None,\n ref_intermediate_latents=None,\n return_intermediates=False,\n lcm_lora=False,\n de_bug=False,\n **kwds):\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if isinstance(prompt, list):\n batch_size = len(prompt)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # if kwds.get(\"dir\"):\n # dir = text_embeddings[-2] - text_embeddings[-1]\n # u, s, v = torch.pca_lowrank(dir.transpose(-1, -2), q=1, center=True)\n # text_embeddings[-1] = text_embeddings[-1] + kwds.get(\"dir\") * v\n # print(u.shape)\n # print(v.shape)\n\n # define initial latents\n latents_shape = (batch_size, self.unet.config.in_channels, height//8, width//8)\n if latents is None:\n latents = torch.randn(latents_shape, device=DEVICE)\n else:\n assert latents.shape == latents_shape, f\"The shape of input latent tensor {latents.shape} should equal to predefined one.\"\n\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n if neg_prompt:\n uc_text = neg_prompt\n else:\n uc_text = \"\"\n # uc_text = \"ugly, tiling, poorly drawn hands, poorly drawn feet, body out of frame, cut off, low contrast, underexposed, distorted face\"\n unconditional_input = self.tokenizer(\n [uc_text] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n # unconditional_input.input_ids = unconditional_input.input_ids[:, 1:]\n unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # iterative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n # print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n latents_list = [latents]\n pred_x0_list = [latents]\n if de_bug:\n import pdb;pdb.set_trace()\n for i, t in enumerate(tqdm(self.scheduler.timesteps, desc=\"DDIM Sampler\")):\n if ref_intermediate_latents is not None:\n # note that the batch_size >= 2\n latents_ref = ref_intermediate_latents[-1 - i]\n _, latents_cur = latents.chunk(2)\n latents = torch.cat([latents_ref, latents_cur])\n\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n if unconditioning is not None and isinstance(unconditioning, list):\n _, text_embeddings = text_embeddings.chunk(2)\n text_embeddings = torch.cat([unconditioning[i].expand(*text_embeddings.shape), text_embeddings]) \n # predict tghe noise\n noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t -> x_t-1\n if lcm_lora:\n latents, pred_x0 = self.scheduler.step(noise_pred, t, latents, return_dict=False)\n else:\n latents, pred_x0 = self.step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n image = self.latent2image(latents, return_type=\"pt\")\n if return_intermediates:\n pred_x0_list = [self.latent2image(img, return_type=\"pt\") for img in pred_x0_list]\n latents_list = [self.latent2image(img, return_type=\"pt\") for img in latents_list]\n return image, pred_x0_list, latents_list\n return image\n\n @torch.no_grad()\n def invert(\n self,\n image: torch.Tensor,\n prompt,\n num_inference_steps=50,\n guidance_scale=7.5,\n eta=0.0,\n return_intermediates=False,\n **kwds):\n \"\"\"\n invert a real image into noise map with determinisc DDIM inversion\n \"\"\"\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n batch_size = image.shape[0]\n if isinstance(prompt, list):\n if batch_size == 1:\n image = image.expand(len(prompt), -1, -1, -1)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # define initial latents\n latents = self.image2latent(image)\n start_latents = latents\n # print(latents)\n # exit()\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n unconditional_input = self.tokenizer(\n [\"\"] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # interative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n # print(\"attributes: \", self.scheduler.__dict__)\n latents_list = [latents]\n pred_x0_list = [latents]\n for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc=\"DDIM Inversion\")):\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n\n # predict the noise\n noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t-1 -> x_t\n latents, pred_x0 = self.next_step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n if return_intermediates:\n # return the intermediate laters during inversion\n # pred_x0_list = [self.latent2image(img, return_type=\"pt\") for img in pred_x0_list]\n return latents, latents_list\n return latents, start_latents"
},
{
"identifier": "AttentionBase",
"path": "utils/masactrl_utils.py",
"snippet": "class AttentionBase:\n def __init__(self):\n self.cur_step = 0\n self.num_att_layers = -1\n self.cur_att_layer = 0\n\n def after_step(self):\n pass\n\n def __call__(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n out = self.forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs)\n self.cur_att_layer += 1\n if self.cur_att_layer == self.num_att_layers:\n self.cur_att_layer = 0\n self.cur_step += 1\n # after step\n self.after_step()\n return out\n\n def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n out = torch.einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=num_heads)\n return out\n\n def reset(self):\n self.cur_step = 0\n self.cur_att_layer = 0"
},
{
"identifier": "regiter_attention_editor_diffusers",
"path": "utils/masactrl_utils.py",
"snippet": "def regiter_attention_editor_diffusers(model, editor: AttentionBase):\n \"\"\"\n Register a attention editor to Diffuser Pipeline, refer from [Prompt-to-Prompt]\n \"\"\"\n def ca_forward(self, place_in_unet):\n def forward(x, encoder_hidden_states=None, attention_mask=None, context=None, mask=None):\n \"\"\"\n The attention is similar to the original implementation of LDM CrossAttention class\n except adding some modifications on the attention\n \"\"\"\n if encoder_hidden_states is not None:\n context = encoder_hidden_states\n if attention_mask is not None:\n mask = attention_mask\n\n to_out = self.to_out\n if isinstance(to_out, nn.modules.container.ModuleList):\n to_out = self.to_out[0]\n else:\n to_out = self.to_out\n\n h = self.heads\n q = self.to_q(x)\n is_cross = context is not None\n context = context if is_cross else x\n k = self.to_k(context)\n v = self.to_v(context)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))\n\n sim = torch.einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if mask is not None:\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n mask = mask[:, None, :].repeat(h, 1, 1)\n sim.masked_fill_(~mask, max_neg_value)\n\n attn = sim.softmax(dim=-1)\n # the only difference\n out = editor(\n q, k, v, sim, attn, is_cross, place_in_unet,\n self.heads, scale=self.scale)\n\n return to_out(out)\n\n return forward\n\n def register_editor(net, count, place_in_unet):\n for name, subnet in net.named_children():\n if net.__class__.__name__ == 'Attention': # spatial Transformer layer\n net.forward = ca_forward(net, place_in_unet)\n return count + 1\n elif hasattr(net, 'children'):\n count = register_editor(subnet, count, place_in_unet)\n return count\n\n cross_att_count = 0\n for net_name, net in model.unet.named_children():\n if \"down\" in net_name:\n cross_att_count += register_editor(net, 0, \"down\")\n elif \"mid\" in net_name:\n cross_att_count += register_editor(net, 0, \"mid\")\n elif \"up\" in net_name:\n cross_att_count += register_editor(net, 0, \"up\")\n editor.num_att_layers = cross_att_count"
},
{
"identifier": "MaskPromptedStyleAttentionControl",
"path": "utils/style_attn_control.py",
"snippet": "class MaskPromptedStyleAttentionControl(AttentionBase):\n def __init__(self, start_step=4, start_layer=10, style_attn_step=35, layer_idx=None, step_idx=None, total_steps=50, style_guidance=0.1, \n only_masked_region=False, guidance=0.0, \n style_mask=None, source_mask=None, de_bug=False):\n \"\"\"\n MaskPromptedSAC\n Args:\n start_step: the step to start mutual self-attention control\n start_layer: the layer to start mutual self-attention control\n layer_idx: list of the layers to apply mutual self-attention control\n step_idx: list the steps to apply mutual self-attention control\n total_steps: the total number of steps\n thres: the thereshold for mask thresholding\n ref_token_idx: the token index list for cross-attention map aggregation\n cur_token_idx: the token index list for cross-attention map aggregation\n mask_save_dir: the path to save the mask image\n \"\"\"\n\n super().__init__()\n self.total_steps = total_steps\n self.total_layers = 16\n self.start_step = start_step\n self.start_layer = start_layer\n self.layer_idx = layer_idx if layer_idx is not None else list(range(start_layer, self.total_layers))\n self.step_idx = step_idx if step_idx is not None else list(range(start_step, total_steps))\n print(\"using MaskPromptStyleAttentionControl\")\n print(\"MaskedSAC at denoising steps: \", self.step_idx)\n print(\"MaskedSAC at U-Net layers: \", self.layer_idx)\n \n self.de_bug = de_bug\n self.style_guidance = style_guidance\n self.only_masked_region = only_masked_region\n self.style_attn_step = style_attn_step\n self.self_attns = []\n self.cross_attns = []\n self.guidance = guidance\n self.style_mask = style_mask\n self.source_mask = source_mask\n\n\n def after_step(self):\n self.self_attns = []\n self.cross_attns = []\n\n def attn_batch(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, q_mask,k_mask, **kwargs):\n B = q.shape[0] // num_heads\n H = W = int(np.sqrt(q.shape[1]))\n q = rearrange(q, \"(b h) n d -> h (b n) d\", h=num_heads)\n k = rearrange(k, \"(b h) n d -> h (b n) d\", h=num_heads)\n v = rearrange(v, \"(b h) n d -> h (b n) d\", h=num_heads)\n\n sim = torch.einsum(\"h i d, h j d -> h i j\", q, k) * kwargs.get(\"scale\")\n \n if q_mask is not None:\n sim = sim.masked_fill(q_mask.unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n \n if k_mask is not None:\n sim = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n \n attn = sim.softmax(-1) if attn is None else attn\n\n if len(attn) == 2 * len(v):\n v = torch.cat([v] * 2)\n out = torch.einsum(\"h i j, h j d -> h i d\", attn, v)\n out = rearrange(out, \"(h1 h) (b n) d -> (h1 b) n (h d)\", b=B, h=num_heads)\n return out\n \n def attn_batch_fg_bg(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, q_mask,k_mask, **kwargs):\n B = q.shape[0] // num_heads\n H = W = int(np.sqrt(q.shape[1]))\n q = rearrange(q, \"(b h) n d -> h (b n) d\", h=num_heads)\n k = rearrange(k, \"(b h) n d -> h (b n) d\", h=num_heads)\n v = rearrange(v, \"(b h) n d -> h (b n) d\", h=num_heads)\n sim = torch.einsum(\"h i d, h j d -> h i j\", q, k) * kwargs.get(\"scale\")\n if q_mask is not None:\n sim_fg = sim.masked_fill(q_mask.unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n sim_bg = sim.masked_fill(q_mask.unsqueeze(0)==1, -torch.finfo(sim.dtype).max)\n if k_mask is not None:\n sim_fg = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n sim_bg = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==1, -torch.finfo(sim.dtype).max)\n sim = torch.cat([sim_fg, sim_bg])\n attn = sim.softmax(-1)\n\n if len(attn) == 2 * len(v):\n v = torch.cat([v] * 2)\n out = torch.einsum(\"h i j, h j d -> h i d\", attn, v)\n out = rearrange(out, \"(h1 h) (b n) d -> (h1 b) n (h d)\", b=B, h=num_heads)\n return out\n \n def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n\n \"\"\"\n Attention forward function\n \"\"\"\n \n if is_cross or self.cur_step not in self.step_idx or self.cur_att_layer // 2 not in self.layer_idx:\n return super().forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs)\n\n B = q.shape[0] // num_heads // 2\n H = W = int(np.sqrt(q.shape[1]))\n \n if self.style_mask is not None and self.source_mask is not None:\n #mask = self.aggregate_cross_attn_map(idx=self.cur_token_idx) # (4, H, W)\n heigh, width = self.style_mask.shape[-2:]\n mask_style = self.style_mask# (H, W)\n mask_source = self.source_mask# (H, W)\n scale = int(np.sqrt(heigh * width / q.shape[1]))\n # res = int(np.sqrt(q.shape[1]))\n spatial_mask_source = F.interpolate(mask_source, (heigh//scale, width//scale)).reshape(-1, 1)\n spatial_mask_style = F.interpolate(mask_style, (heigh//scale, width//scale)).reshape(-1, 1)\n \n else:\n spatial_mask_source=None\n spatial_mask_style=None\n\n if spatial_mask_style is None or spatial_mask_source is None:\n \n out_s,out_c,out_t = self.style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n \n else:\n if self.only_masked_region:\n out_s,out_c,out_t = self.mask_prompted_style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n else:\n out_s,out_c,out_t = self.separate_mask_prompted_style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n\n out = torch.cat([out_s,out_c,out_t],dim=0) \n return out\n \n\n def style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n if self.de_bug:\n import pdb; pdb.set_trace()\n \n qs, qc, qt = q.chunk(3)\n\n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n out_c = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n\n if self.cur_step < self.style_attn_step:\n out_t = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n else:\n out_t = self.attn_batch(qt, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n if self.style_guidance>=0:\n out_t = out_c + (out_t - out_c) * self.style_guidance\n return out_s,out_c,out_t\n\n def mask_prompted_style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n qs, qc, qt = q.chunk(3)\n \n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n out_c = self.attn_batch(qc, k[num_heads: 2*num_heads], v[num_heads:2*num_heads], sim[num_heads: 2*num_heads], attn[num_heads: 2*num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None, **kwargs)\n out_c_new = self.attn_batch(qc, k[num_heads: 2*num_heads], v[num_heads:2*num_heads], sim[num_heads: 2*num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None, **kwargs)\n \n if self.de_bug:\n import pdb; pdb.set_trace()\n\n if self.cur_step < self.style_attn_step:\n out_t = out_c #self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n else:\n out_t_fg = self.attn_batch(qt, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c_fg = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n if self.style_guidance>=0:\n out_t = out_c_fg + (out_t_fg - out_c_fg) * self.style_guidance \n \n out_t = out_t * spatial_mask_source + out_c * (1 - spatial_mask_source)\n\n if self.de_bug:\n import pdb; pdb.set_trace()\n \n # print(torch.sum(out_t* (1 - spatial_mask_source) - out_c * (1 - spatial_mask_source)))\n return out_s,out_c,out_t\n\n def separate_mask_prompted_style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n \n if self.de_bug:\n import pdb; pdb.set_trace()\n # To prevent query confusion, render fg and bg according to mask.\n qs, qc, qt = q.chunk(3)\n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n if self.cur_step < self.style_attn_step: \n \n out_c = self.attn_batch_fg_bg(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c_fg,out_c_bg = out_c.chunk(2)\n out_t = out_c_fg * spatial_mask_source + out_c_bg * (1 - spatial_mask_source)\n\n else:\n out_t = self.attn_batch_fg_bg(qt, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c = self.attn_batch_fg_bg(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_t_fg,out_t_bg = out_t.chunk(2)\n out_c_fg,out_c_bg = out_c.chunk(2)\n if self.style_guidance>=0:\n out_t_fg = out_c_fg + (out_t_fg - out_c_fg) * self.style_guidance \n out_t_bg = out_c_bg + (out_t_bg - out_c_bg) * self.style_guidance \n out_t = out_t_fg * spatial_mask_source + out_t_bg * (1 - spatial_mask_source)\n \n return out_s,out_t,out_t"
}
] | import os
import sys
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import argparse
import numpy as np
from tqdm import tqdm
from diffusers import DDIMScheduler,LCMScheduler
from torchvision.utils import save_image
from torchvision.io import read_image
from PIL import Image
from utils.pipeline import MasaCtrlPipeline
from utils.masactrl_utils import AttentionBase, regiter_attention_editor_diffusers
from utils.style_attn_control import MaskPromptedStyleAttentionControl | 8,011 |
def load_image(image_path, res, device, gray=False):
image = Image.open(image_path).convert('RGB') if not gray else Image.open(image_path).convert('L')
image = torch.tensor(np.array(image)).float()
if gray:
image = image.unsqueeze(-1).repeat(1,1,3)
image = image.permute(2, 0, 1)
image = image[:3].unsqueeze_(0).float() / 127.5 - 1. # [-1, 1]
image = F.interpolate(image, (res, res))
image = image.to(device)
return image
def load_mask(image_path, res, device):
if image_path != '':
image = Image.open(image_path).convert('RGB')
image = torch.tensor(np.array(image)).float()
image = image.permute(2, 0, 1)
image = image[:3].unsqueeze_(0).float() / 127.5 - 1. # [-1, 1]
image = F.interpolate(image, (res, res))
image = image.to(device)
image = image[:, :1, :, :]
else:
return None
return image
def main():
args = argparse.ArgumentParser()
args.add_argument("--step", type=int, default=0)
args.add_argument("--layer", type=int, default=10)
args.add_argument("--res", type=int, default=512)
args.add_argument("--style_guidance", type=float, default=1.5)
args.add_argument("--content", type=str, default=None)
args.add_argument("--style", type=str, default=None)
args.add_argument("--content_mask", type=str, default='')
args.add_argument("--style_mask", type=str, default='')
args.add_argument("--output", type=str, default='./results/')
args.add_argument("--only_mask_region", action="store_true")
args.add_argument("--model_path", type=str, default='runwayml/stable-diffusion-v1-5')
args.add_argument("--SAC_step", type=int, default=35)
args.add_argument("--num_inference_steps", type=int, default=50)
args.add_argument("--LCM_lora", action="store_true")
args = args.parse_args()
STEP = args.step
LAYPER = args.layer
only_mask_region = args.only_mask_region
out_dir = args.output
style_guidance = args.style_guidance
num_inference_steps = args.num_inference_steps
SAC_step = args.SAC_step
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
Guidance_scale = 0.0
model_path = args.model_path
model = MasaCtrlPipeline.from_pretrained(model_path).to(device)
if args.LCM_lora:
model.scheduler = LCMScheduler.from_config(model.scheduler.config)
# load LCM-LoRA
model.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
else:
model.scheduler = DDIMScheduler.from_config(model.scheduler.config)
source_image = load_image(args.content, args.res, device)
style_image = load_image(args.style, args.res, device)
style_mask = load_mask(args.style_mask, res=64, device=device)
source_mask = load_mask(args.content_mask, res=args.res, device=device)
with torch.no_grad():
style_content = torch.cat([style_image, source_image], dim=0)
source_prompt = ['head', 'head']
prompts = source_prompt + ['head']
editor = AttentionBase()
|
def load_image(image_path, res, device, gray=False):
image = Image.open(image_path).convert('RGB') if not gray else Image.open(image_path).convert('L')
image = torch.tensor(np.array(image)).float()
if gray:
image = image.unsqueeze(-1).repeat(1,1,3)
image = image.permute(2, 0, 1)
image = image[:3].unsqueeze_(0).float() / 127.5 - 1. # [-1, 1]
image = F.interpolate(image, (res, res))
image = image.to(device)
return image
def load_mask(image_path, res, device):
if image_path != '':
image = Image.open(image_path).convert('RGB')
image = torch.tensor(np.array(image)).float()
image = image.permute(2, 0, 1)
image = image[:3].unsqueeze_(0).float() / 127.5 - 1. # [-1, 1]
image = F.interpolate(image, (res, res))
image = image.to(device)
image = image[:, :1, :, :]
else:
return None
return image
def main():
args = argparse.ArgumentParser()
args.add_argument("--step", type=int, default=0)
args.add_argument("--layer", type=int, default=10)
args.add_argument("--res", type=int, default=512)
args.add_argument("--style_guidance", type=float, default=1.5)
args.add_argument("--content", type=str, default=None)
args.add_argument("--style", type=str, default=None)
args.add_argument("--content_mask", type=str, default='')
args.add_argument("--style_mask", type=str, default='')
args.add_argument("--output", type=str, default='./results/')
args.add_argument("--only_mask_region", action="store_true")
args.add_argument("--model_path", type=str, default='runwayml/stable-diffusion-v1-5')
args.add_argument("--SAC_step", type=int, default=35)
args.add_argument("--num_inference_steps", type=int, default=50)
args.add_argument("--LCM_lora", action="store_true")
args = args.parse_args()
STEP = args.step
LAYPER = args.layer
only_mask_region = args.only_mask_region
out_dir = args.output
style_guidance = args.style_guidance
num_inference_steps = args.num_inference_steps
SAC_step = args.SAC_step
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
Guidance_scale = 0.0
model_path = args.model_path
model = MasaCtrlPipeline.from_pretrained(model_path).to(device)
if args.LCM_lora:
model.scheduler = LCMScheduler.from_config(model.scheduler.config)
# load LCM-LoRA
model.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
else:
model.scheduler = DDIMScheduler.from_config(model.scheduler.config)
source_image = load_image(args.content, args.res, device)
style_image = load_image(args.style, args.res, device)
style_mask = load_mask(args.style_mask, res=64, device=device)
source_mask = load_mask(args.content_mask, res=args.res, device=device)
with torch.no_grad():
style_content = torch.cat([style_image, source_image], dim=0)
source_prompt = ['head', 'head']
prompts = source_prompt + ['head']
editor = AttentionBase() | regiter_attention_editor_diffusers(model, editor) | 2 | 2023-12-06 01:18:39+00:00 | 12k |
MarilynKeller/aitviewer-skel | aitviewer/renderables/spheres.py | [
{
"identifier": "Material",
"path": "aitviewer/scene/material.py",
"snippet": "class Material(object):\n \"\"\"Per object material properties.\"\"\"\n\n def __init__(\n self,\n diffuse=0.5,\n ambient=0.5,\n specular=0.5,\n color=(0.5, 0.5, 0.5, 1.0),\n ):\n \"\"\"\n :param diffuse: diffuse coefficient in Phong shading model\n :param ambient: ambient coefficient in Phong shading model\n :param specular: specular coefficient in Phong shading model\n :param color: (R,G,B,A) 0-1 formatted color value\n \"\"\"\n assert len(color) == 4\n\n self.diffuse = diffuse\n self.ambient = ambient\n self.specular = specular\n self._color = color\n\n @property\n def color(self):\n return self._color\n\n @color.setter\n def color(self, color):\n self._color = color"
},
{
"identifier": "Node",
"path": "aitviewer/scene/node.py",
"snippet": "class Node(object):\n \"\"\"Interface for nodes.\"\"\"\n\n def __init__(\n self,\n name=None,\n icon=None,\n position=None,\n rotation=None,\n scale=1.0,\n color=(0.5, 0.5, 0.5, 1.0),\n material=None,\n is_selectable=True,\n gui_affine=True,\n gui_material=True,\n enabled_frames=None,\n n_frames=1,\n ):\n \"\"\"\n :param name: Name of the node\n :param icon: Custom Node Icon using custom Icon font\n :param position: Starting position in the format (X,Y,Z) or np array of positions with shape (F, 3)\n :param rotation: Starting rotation in rotation matrix representation (3,3) or np array of rotations with shape (F, 3, 3)\n :param scale: Starting scale (scalar) or np array of scale values with shape (F)\n :param color: (R,G,B,A) 0-1 formatted color value.\n :param material: Object material properties. The color specified in the material will override node color\n :param is_selectable: If True the node is selectable when clicked on, otherwise the parent node will be selected.\n :param gui_affine: If True the node will have transform controls (position, rotation, scale) in the GUI.\n :param gui_material: If True the node will have material controls in the GUI.\n :param enabled_frames: Numpy array of boolean values, the object will be enabled only in frames where the value is True,\n the number of ones in the mask must match the number of frames of the object.\n :param n_frames: How many frames this renderable has.\n \"\"\"\n # Transform & Animation\n position = np.zeros(3, dtype=np.float32) if position is None else np.array(position, dtype=np.float32)\n rotation = np.eye(3, dtype=np.float32) if rotation is None else np.array(rotation, dtype=np.float32)\n\n self._positions = position if len(position.shape) != 1 else position[np.newaxis]\n self._rotations = rotation if len(rotation.shape) != 2 else rotation[np.newaxis]\n self._scales = (scale if isinstance(scale, np.ndarray) else np.array([scale])).astype(np.float32)\n\n n_positions = self._positions.shape[0]\n n_rotations = self._rotations.shape[0]\n n_scales = self._scales.shape[0]\n\n if n_frames > 1:\n assert n_positions == 1 or n_frames == n_positions, (\n f\"Number of position frames\" f\" ({n_positions}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_rotations == 1 or n_frames == n_rotations, (\n f\"Number of rotations frames\" f\" ({n_rotations}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_scales == 1 or n_frames == n_scales, (\n f\"Number of scales frames\" f\" ({n_scales}) must be 1 or match number of Node frames {n_frames}\"\n )\n else:\n n_frames = max(n_positions, n_rotations, n_scales)\n assert (\n (n_positions == 1 or n_positions == n_frames)\n and (n_rotations == 1 or n_rotations == n_frames)\n and (n_scales == 1 or n_scales == n_frames)\n ), (\n f\"Number of position\"\n f\"({n_positions}), rotation ({n_rotations}) and scale ({n_scales})\"\n \"frames must be 1 or match.\"\n )\n\n # Frames\n self._n_frames = n_frames\n self._current_frame_id = 0\n self.model_matrix = self.get_local_transform()\n self._enabled_frames = enabled_frames\n if self._enabled_frames is not None:\n assert np.count_nonzero(self._enabled_frames) == n_frames, (\n f\"Number of non-zero elements in enabled_frames\"\n f\" ({np.count_nonzero(self._enabled_frames)}) must match number of frames in sequence ({n_frames})\"\n )\n # Create an array that maps from the true frame id (counting also disabled frames) to the index of the\n # first existing frame in the sequence.\n self._enabled_frame_id = np.cumsum(self._enabled_frames) - 1\n\n # Stores the true frame id (counting also disabled frames) we use this to allow going\n # through both enabled and disabled frames from the GUI.\n self._internal_frame_id = 0\n\n # Material\n self.material = Material(color=color) if material is None else material\n\n # Renderable Attributes\n self.is_renderable = False\n self.backface_culling = True\n self.backface_fragmap = False\n self.draw_outline = False\n\n # Flags to enable rendering passes\n self.cast_shadow = False\n self.depth_prepass = False\n self.fragmap = False\n self.outline = False\n\n # Programs for render passes. Subclasses are responsible for setting these.\n self.depth_only_program = None # Required for depth_prepass and cast_shadow passes\n self.fragmap_program = None # Required for fragmap pass\n self.outline_program = None # Required for outline pass\n\n # GUI\n self.name = name if name is not None else type(self).__name__\n self.uid = C.next_gui_id()\n self.unique_name = self.name + \"{}\".format(self.uid)\n self.icon = icon if icon is not None else \"\\u0082\"\n self._enabled = True\n self._expanded = False\n self.gui_controls = {\n \"affine\": {\n \"fn\": self.gui_affine,\n \"icon\": \"\\u009b\",\n \"is_visible\": gui_affine,\n },\n \"material\": {\n \"fn\": self.gui_material,\n \"icon\": \"\\u0088\",\n \"is_visible\": gui_material,\n },\n \"animation\": {\n \"fn\": self.gui_animation,\n \"icon\": \"\\u0098\",\n \"is_visible\": (lambda: self._n_frames > 1)(),\n },\n \"io\": {\n \"fn\": self.gui_io,\n \"icon\": \"\\u009a\",\n \"is_visible\": (lambda: self.gui_io.__func__ is not Node.gui_io)(),\n },\n }\n self.gui_modes = {\"view\": {\"title\": \" View\", \"fn\": self.gui_mode_view, \"icon\": \"\\u0099\"}}\n self._selected_mode = \"view\"\n self._show_in_hierarchy = True\n self.is_selectable = is_selectable\n self.export_usd_enabled = True\n self.export_usd_expanded = True\n\n self.nodes: List[Node] = []\n self.parent: Node = None\n\n # Selected Mode\n @property\n def selected_mode(self):\n return self._selected_mode\n\n @selected_mode.setter\n def selected_mode(self, selected_mode):\n self._selected_mode = selected_mode\n\n # Transform\n @property\n def position(self):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n return self._positions[idx]\n\n @position.setter\n def position(self, position):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n self._positions[idx] = np.array(position, dtype=np.float32).copy()\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def positions(self):\n return self._positions\n\n @positions.setter\n def positions(self, positions):\n self._positions = positions\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotation(self):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n return self._rotations[idx]\n\n @rotation.setter\n def rotation(self, rotation):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n self._rotations[idx] = rotation\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotations(self):\n return self._rotations\n\n @rotations.setter\n def rotations(self, rotations):\n self._rotations = rotations\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scale(self):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n return self._scales[idx]\n\n @scale.setter\n def scale(self, scale):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n self._scales[idx] = scale\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scales(self):\n return self._scales\n\n @scales.setter\n def scales(self, scales):\n self._scales = scales\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @staticmethod\n @lru_cache()\n def _compute_transform(pos, rot, scale):\n rotation = np.eye(4)\n rotation[:3, :3] = np.array(rot)\n\n trans = np.eye(4)\n trans[:3, 3] = np.array(pos)\n\n scale = np.diag([scale, scale, scale, 1])\n\n return (trans @ rotation @ scale).astype(\"f4\")\n\n def get_local_transform(self):\n \"\"\"Construct local transform as a 4x4 matrix from this node's position, orientation and scale.\"\"\"\n return self._compute_transform(tuple(self.position), tuple(map(tuple, self.rotation)), self.scale)\n\n def update_transform(self, parent_transform=None):\n \"\"\"Update the model matrix of this node and all of its descendants.\"\"\"\n if parent_transform is None:\n self.model_matrix = self.get_local_transform()\n else:\n self.model_matrix = parent_transform.astype(\"f4\") @ self.get_local_transform()\n\n for n in self.nodes:\n n.update_transform(self.model_matrix)\n\n @property\n def color(self):\n return self.material.color\n\n @color.setter\n def color(self, color):\n self.material.color = color\n\n @property\n def bounds(self):\n \"\"\"The bounds in the format ((x_min, x_max), (y_min, y_max), (z_min, z_max))\"\"\"\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_bounds(self):\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_center(self):\n return self.current_bounds.mean(-1)\n\n @property\n def center(self):\n return self.bounds.mean(-1)\n\n def get_local_bounds(self, points):\n if len(points.shape) == 2 and points.shape[-1] == 3:\n points = points[np.newaxis]\n assert len(points.shape) == 3\n\n # Compute min and max coordinates of the bounding box ignoring NaNs.\n val = np.array(\n [\n [np.nanmin(points[:, :, 0]), np.nanmax(points[:, :, 0])],\n [np.nanmin(points[:, :, 1]), np.nanmax(points[:, :, 1])],\n [np.nanmin(points[:, :, 2]), np.nanmax(points[:, :, 2])],\n ]\n )\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n def get_bounds(self, points):\n val = self.get_local_bounds(points)\n\n # Transform bounding box with the model matrix.\n val = (self.model_matrix @ np.vstack((val, np.array([1.0, 1.0]))))[:3]\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n @property\n def n_frames(self):\n return self._n_frames\n\n @n_frames.setter\n def n_frames(self, n_frames):\n self._n_frames = n_frames\n\n def __len__(self):\n return self.n_frames\n\n @property\n def current_frame_id(self):\n return self._current_frame_id\n\n @current_frame_id.setter\n def current_frame_id(self, frame_id):\n # Check if the frame changed.\n last_frame_id = self._current_frame_id if self._enabled_frames is None else self._internal_frame_id\n if self.n_frames == 1 or frame_id == last_frame_id:\n return\n\n self.on_before_frame_update()\n if self._enabled_frames is None:\n if frame_id < 0:\n self._current_frame_id = 0\n elif frame_id >= len(self):\n self._current_frame_id = len(self) - 1\n else:\n self._current_frame_id = frame_id\n else:\n # If an enabled_frames is present use it to get the current frame.\n if frame_id < 0:\n self._internal_frame_id = 0\n elif frame_id >= self._enabled_frames.shape[0]:\n self._internal_frame_id = self._enabled_frames.shape[0] - 1\n else:\n self._internal_frame_id = frame_id\n self._current_frame_id = self._enabled_frame_id[self._internal_frame_id]\n # Update enabled using the mask.\n self.enabled = self._enabled_frames[self._internal_frame_id]\n\n # Update frame id of all children nodes.\n for n in self.nodes:\n n.current_frame_id = self._current_frame_id\n\n self.on_frame_update()\n if self.parent and (self._positions.shape[0] > 1 or self._rotations.shape[0] > 1 or self._scales.shape[0] > 1):\n self.update_transform(self.parent.model_matrix)\n\n def next_frame(self):\n self.current_frame_id = self.current_frame_id + 1 if self.current_frame_id < len(self) - 1 else 0\n\n def previous_frame(self):\n self.current_frame_id = self.current_frame_id - 1 if self.current_frame_id > 0 else len(self) - 1\n\n def on_before_frame_update(self):\n \"\"\"Called when the current frame is about to change, 'self.current_frame_id' still has the id of the\n previous frame.\"\"\"\n pass\n\n def on_frame_update(self):\n \"\"\"Called when the current frame is changed.\"\"\"\n pass\n\n def add(self, *nodes, **kwargs):\n self._add_nodes(*nodes, **kwargs)\n\n def _add_node(self, n: \"Node\", show_in_hierarchy=True, expanded=False, enabled=True):\n \"\"\"\n Add a single node\n :param show_in_hierarchy: Whether to show the node in the scene hierarchy.\n :param expanded: Whether the node is initially expanded in the GUI.\n \"\"\"\n if n is None:\n return\n n._show_in_hierarchy = show_in_hierarchy\n n._expanded = expanded\n n._enabled = enabled if n._enabled_frames is None else n._enabled_frames[n.current_frame_id]\n self.nodes.append(n)\n n.parent = self\n n.update_transform(self.model_matrix)\n\n def _add_nodes(self, *nodes, **kwargs):\n \"\"\"Add multiple nodes\"\"\"\n for n in nodes:\n self._add_node(n, **kwargs)\n\n def remove(self, *nodes):\n for n in nodes:\n n.release()\n try:\n self.nodes.remove(n)\n except:\n pass\n\n @property\n def show_in_hierarchy(self):\n return self._show_in_hierarchy\n\n @property\n def enabled(self):\n return self._enabled\n\n @enabled.setter\n def enabled(self, enabled):\n self._enabled = enabled\n\n @property\n def expanded(self):\n return self._expanded\n\n @expanded.setter\n def expanded(self, expanded):\n self._expanded = expanded\n\n def is_transparent(self):\n \"\"\"\n Returns true if the object is transparent and should thus be sorted when rendering.\n Subclassess that use a different color should implement this method to be rendered correctly when transparent.\n \"\"\"\n return self.material.color[3] < 1.0\n\n def gui(self, imgui):\n \"\"\"\n Render GUI for custom node properties and controls. Implementation optional.\n Elements rendered here will show up in the scene hierarchy\n :param imgui: imgui context.\n See https://pyimgui.readthedocs.io/en/latest/reference/imgui.core.html for available elements to render\n \"\"\"\n pass\n\n def gui_modes(self, imgui):\n \"\"\"Render GUI with toolbar (tools) for this particular node\"\"\"\n\n def gui_animation(self, imgui):\n \"\"\"Render GUI for animation related settings\"\"\"\n\n if self._enabled_frames is None:\n if self.n_frames > 1:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self.current_frame_id,\n min_value=0,\n max_value=self.n_frames - 1,\n )\n if u:\n self.current_frame_id = fid\n else:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self._internal_frame_id,\n min_value=0,\n max_value=self._enabled_frames.shape[0] - 1,\n )\n if u:\n self.current_frame_id = fid\n\n def gui_affine(self, imgui):\n \"\"\"Render GUI for affine transformations\"\"\"\n # Position controls\n up, pos = imgui.drag_float3(\n \"Position##pos{}\".format(self.unique_name),\n *self.position,\n 1e-2,\n format=\"%.2f\",\n )\n if up:\n self.position = pos\n\n # Rotation controls\n euler_angles = rot2euler_numpy(self.rotation[np.newaxis], degrees=True)[0]\n ur, euler_angles = imgui.drag_float3(\n \"Rotation##pos{}\".format(self.unique_name),\n *euler_angles,\n 1e-2,\n format=\"%.2f\",\n )\n if ur:\n self.rotation = euler2rot_numpy(np.array(euler_angles)[np.newaxis], degrees=True)[0]\n\n # Scale controls\n us, scale = imgui.drag_float(\n \"Scale##scale{}\".format(self.unique_name),\n self.scale,\n 1e-2,\n min_value=0.001,\n max_value=100.0,\n format=\"%.3f\",\n )\n if us:\n self.scale = scale\n\n def gui_material(self, imgui):\n \"\"\"Render GUI with material properties\"\"\"\n\n # Color Control\n uc, color = imgui.color_edit4(\"Color##color{}'\".format(self.unique_name), *self.material.color)\n if uc:\n self.color = color\n\n # Diffuse\n ud, diffuse = imgui.slider_float(\n \"Diffuse##diffuse{}\".format(self.unique_name),\n self.material.diffuse,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ud:\n self.material.diffuse = diffuse\n\n # Ambient\n ua, ambient = imgui.slider_float(\n \"Ambient##ambient{}\".format(self.unique_name),\n self.material.ambient,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ua:\n self.material.ambient = ambient\n\n def gui_io(self, imgui):\n \"\"\"Render GUI for import/export\"\"\"\n pass\n\n def gui_mode_view(self, imgui):\n \"\"\"Render custom GUI for view mode\"\"\"\n pass\n\n def gui_context_menu(self, imgui, x: int, y: int):\n _, self.enabled = imgui.checkbox(\"Enabled\", self.enabled)\n if any([n._show_in_hierarchy for n in self.nodes]):\n imgui.spacing()\n imgui.separator()\n imgui.spacing()\n for n in self.nodes:\n if not n._show_in_hierarchy:\n continue\n if imgui.begin_menu(f\"{n.name}##{n.uid}\"):\n n.gui_context_menu(imgui, x, y)\n imgui.end_menu()\n\n # Renderable\n @staticmethod\n def once(func):\n def _decorator(self, *args, **kwargs):\n if self.is_renderable:\n return\n else:\n func(self, *args, **kwargs)\n self.is_renderable = True\n\n return _decorator\n\n def make_renderable(self, ctx):\n \"\"\"\n Prepares this object for rendering. This function must be called before `render` is used.\n :param ctx: The moderngl context.\n \"\"\"\n pass\n\n def render(self, camera, position=None, rotation=None, **kwargs):\n \"\"\"Render the current frame in this sequence.\"\"\"\n pass\n\n def render_positions(self, prog):\n \"\"\"\n Render with a VAO with only positions bound, used for shadow mapping, fragmap and depth prepass.\n \"\"\"\n pass\n\n def redraw(self, **kwargs):\n \"\"\"Perform update and redraw operations. Push to the GPU when finished. Recursively redraw child nodes\"\"\"\n for n in self.nodes:\n n.redraw(**kwargs)\n\n def set_camera_matrices(self, prog, camera, **kwargs):\n \"\"\"Set the model view projection matrix in the given program.\"\"\"\n # Transpose because np is row-major but OpenGL expects column-major.\n prog[\"model_matrix\"].write(self.model_matrix.T.astype(\"f4\").tobytes())\n prog[\"view_projection_matrix\"].write(camera.get_view_projection_matrix().T.astype(\"f4\").tobytes())\n\n def receive_shadow(self, program, **kwargs):\n \"\"\"\n Call this function if the renderable is to receive shadows.\n :param program: The shader program that can shade with shadows.\n :param kwargs: The render kwargs.\n \"\"\"\n if kwargs.get(\"shadows_enabled\", False):\n lights = kwargs[\"lights\"]\n\n for i, light in enumerate(lights):\n if light.shadow_enabled and light.shadow_map:\n light_matrix = light.mvp() @ self.model_matrix\n program[f\"dirLights[{i}].matrix\"].write(light_matrix.T.tobytes())\n\n # Bind shadowmap to slot i + 1, we reserve slot 0 for the mesh texture\n # and use slots 1 to (#lights + 1) for shadow maps\n light.shadow_map.use(location=i + 1)\n\n # Set sampler uniforms\n uniform = program[f\"shadow_maps\"]\n uniform.value = 1 if uniform.array_length == 1 else [*range(1, len(lights) + 1)]\n\n def render_shadowmap(self, light_matrix):\n if not self.cast_shadow or self.depth_only_program is None or self.color[3] == 0.0:\n return\n\n prog = self.depth_only_program\n prog[\"model_matrix\"].write(self.model_matrix.T.tobytes())\n prog[\"view_projection_matrix\"].write(light_matrix.T.tobytes())\n\n self.render_positions(prog)\n\n def render_fragmap(self, ctx, camera, uid=None):\n if not self.fragmap or self.fragmap_program is None:\n return\n\n # Transpose because np is row-major but OpenGL expects column-major.\n prog = self.fragmap_program\n self.set_camera_matrices(prog, camera)\n\n # Render with the specified object uid, if None use the node uid instead.\n prog[\"obj_id\"] = uid or self.uid\n\n if self.backface_culling or self.backface_fragmap:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n\n # If backface_fragmap is enabled for this node only render backfaces\n if self.backface_fragmap:\n ctx.cull_face = \"front\"\n\n self.render_positions(prog)\n\n # Restore cull face to back\n if self.backface_fragmap:\n ctx.cull_face = \"back\"\n\n def render_depth_prepass(self, camera, **kwargs):\n if not self.depth_prepass or self.depth_only_program is None:\n return\n\n prog = self.depth_only_program\n self.set_camera_matrices(prog, camera)\n self.render_positions(prog)\n\n def render_outline(self, ctx, camera):\n if self.outline and self.outline_program is not None:\n prog = self.outline_program\n self.set_camera_matrices(prog, camera)\n\n if self.backface_culling:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n self.render_positions(prog)\n\n # Render children node recursively.\n for n in self.nodes:\n n.render_outline(ctx, camera)\n\n def release(self):\n \"\"\"\n Release all OpenGL resources used by this node and any of its children. Subclasses that instantiate OpenGL\n objects should implement this method with '@hooked' to avoid leaking resources.\n \"\"\"\n for n in self.nodes:\n n.release()\n\n def on_selection(self, node, instance_id, tri_id):\n \"\"\"\n Called when the node is selected\n\n :param node: the node which was clicked (can be None if the selection wasn't a mouse event)\n :param instance_id: the id of the instance that was clicked, 0 if the object is not instanced\n (can be None if the selection wasn't a mouse event)\n :param tri_id: the id of the triangle that was clicked from the 'node' mesh\n (can be None if the selection wasn't a mouse event)\n \"\"\"\n pass\n\n def key_event(self, key, wnd_keys):\n \"\"\"\n Handle shortcut key presses (if you are the selected object)\n \"\"\"\n pass\n\n def update_frames(self, *args, **kwargs):\n pass\n\n def add_frames(self, *args, **kwargs):\n pass\n\n def remove_frames(self, *args, **kwargs):\n pass\n\n def _export_usd_recursively(self, stage, usd_path, directory, verbose):\n if verbose:\n print(usd_path)\n for n in self.nodes:\n if n.export_usd_enabled:\n n.export_usd(stage, usd_path, directory, verbose)\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n \"\"\"\n Export the node into an USD file. Nodes that implement this method should use\n recursively call this for every children that should also be exported.\n\n :param stage: an object of type Usd.Stage into which to export the node\n :param usd_path: the path of the parent object in the USD file scene hierarchy.\n \"\"\"\n from pxr import Gf, UsdGeom\n\n usd_path = f\"{usd_path}/{self.name.replace(' ', '_')}_{self.uid:03}\"\n\n # Transform.\n xform = UsdGeom.Xform.Define(stage, usd_path)\n a_xform = xform.AddTransformOp()\n a_xform.Set(Gf.Matrix4d(self.get_local_transform().astype(np.float64).T))\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)"
},
{
"identifier": "get_depth_only_program",
"path": "aitviewer/shaders.py",
"snippet": "@functools.lru_cache()\ndef get_depth_only_program(vs_path, instanced=0):\n defines = {\"INSTANCED\": instanced}\n return resources.programs.load(\n ProgramDescription(\n vertex_shader=vs_path,\n fragment_shader=\"shadow_mapping/depth_only.fs.glsl\",\n defines=defines,\n )\n )"
},
{
"identifier": "get_fragmap_program",
"path": "aitviewer/shaders.py",
"snippet": "@functools.lru_cache()\ndef get_fragmap_program(vs_path, instanced=0):\n defines = {\"INSTANCED\": instanced}\n return resources.programs.load(\n ProgramDescription(\n vertex_shader=vs_path,\n fragment_shader=\"fragment_picking/frag_map.fs.glsl\",\n defines=defines,\n )\n )"
},
{
"identifier": "get_outline_program",
"path": "aitviewer/shaders.py",
"snippet": "@functools.lru_cache()\ndef get_outline_program(vs_path, instanced=0):\n defines = {\"INSTANCED\": instanced}\n return resources.programs.load(\n ProgramDescription(\n vertex_shader=vs_path,\n fragment_shader=\"outline/outline_prepare.fs.glsl\",\n defines=defines,\n )\n )"
},
{
"identifier": "get_sphere_instanced_program",
"path": "aitviewer/shaders.py",
"snippet": "def get_sphere_instanced_program():\n return get_smooth_lit_with_edges_program(\"sphere_instanced.vs.glsl\")"
},
{
"identifier": "usd",
"path": "aitviewer/utils/usd.py",
"snippet": "def _get_texture_paths(path, name, directory):\ndef copy_texture(path, name, directory):\ndef save_image_as_texture(img, img_name, name, directory):\ndef add_color(stage, mesh, usd_path, color):\ndef add_texture(stage, mesh, usd_path, texture_path):\ndef add_mesh(stage, usd_path, name, vertices, faces, transform):"
},
{
"identifier": "hooked",
"path": "aitviewer/utils/decorators.py",
"snippet": "class hooked:\n def __init__(self, fn):\n self.fn = fn\n\n def __set_name__(self, owner, name):\n func = self.fn\n\n def _decorator(self, *args, **kwargs):\n super_obj = super(owner, self)\n super_fn = getattr(super_obj, func.__name__)\n super_fn(*args, **kwargs)\n return func(self, *args, **kwargs)\n\n setattr(owner, name, _decorator)\n\n def __call__(self):\n assert (\n False\n ), \"@hooked decorator object should never be called directly. This can happen if you apply this decorator to a function that is not a method.\""
},
{
"identifier": "set_lights_in_program",
"path": "aitviewer/utils/utils.py",
"snippet": "def set_lights_in_program(prog, lights, shadows_enabled, ambient_strength):\r\n \"\"\"Set program lighting from scene lights\"\"\"\r\n for i, light in enumerate(lights):\r\n prog[f\"dirLights[{i}].direction\"].value = tuple(light.direction)\r\n prog[f\"dirLights[{i}].color\"].value = light.light_color\r\n prog[f\"dirLights[{i}].strength\"].value = light.strength if light.enabled else 0.0\r\n prog[f\"dirLights[{i}].shadow_enabled\"].value = shadows_enabled and light.shadow_enabled\r\n prog[\"ambient_strength\"] = ambient_strength\r"
},
{
"identifier": "set_material_properties",
"path": "aitviewer/utils/utils.py",
"snippet": "def set_material_properties(prog, material):\r\n prog[\"diffuse_coeff\"].value = material.diffuse\r\n prog[\"ambient_coeff\"].value = material.ambient\r"
}
] | import moderngl
import numpy as np
from moderngl_window.opengl.vao import VAO
from aitviewer.scene.material import Material
from aitviewer.scene.node import Node
from aitviewer.shaders import (
get_depth_only_program,
get_fragmap_program,
get_outline_program,
get_sphere_instanced_program,
)
from aitviewer.utils import usd
from aitviewer.utils.decorators import hooked
from aitviewer.utils.utils import set_lights_in_program, set_material_properties | 9,397 | :param radius: Radius of the spheres.
:param color: Color of the spheres.
:param rings: Longitudinal resolution.
:param sectors: Latitudinal resolution.
"""
if len(positions.shape) == 2:
positions = positions[np.newaxis]
assert len(positions.shape) == 3
# Define a default material in case there is None.
if isinstance(color, tuple) or len(color.shape) == 1:
kwargs["material"] = kwargs.get("material", Material(color=color, ambient=0.2))
self.sphere_colors = kwargs["material"].color
else:
assert color.shape[1] == 4 and positions.shape[1] == color.shape[0]
self.sphere_colors = color
if "n_frames" not in kwargs:
kwargs["n_frames"] = positions.shape[0]
super().__init__(icon=icon, **kwargs)
self._sphere_positions = positions
self.radius = radius
self.vertices, self.faces = _create_sphere(radius=1.0, rings=rings, sectors=sectors)
self.n_vertices = self.vertices.shape[0]
self.n_spheres = self.sphere_positions.shape[1]
self.draw_edges = False
self._need_upload = True
# Render passes.
self.outline = True
self.fragmap = True
self.depth_prepass = True
self.cast_shadow = cast_shadow
@property
def bounds(self):
bounds = self.get_bounds(self.sphere_positions)
bounds[:, 0] -= self.radius
bounds[:, 1] += self.radius
return bounds
@property
def current_bounds(self):
bounds = self.get_bounds(self.current_sphere_positions)
bounds[:, 0] -= self.radius
bounds[:, 1] += self.radius
return bounds
@property
def vertex_colors(self):
if len(self._sphere_colors.shape) == 1:
return np.full((self.n_spheres * self.n_vertices, 4), self._sphere_colors)
else:
return np.tile(self._sphere_colors, (self.n_vertices, 1))
def color_one(self, index, color):
new_colors = np.tile(np.array(self.material.color), (self.n_spheres, 1))
new_colors[index] = color
self.sphere_colors = new_colors
@Node.color.setter
def color(self, color):
self.material.color = color
self.sphere_colors = color
self.redraw()
@property
def sphere_colors(self):
if len(self._sphere_colors.shape) == 1:
t = np.tile(np.array(self._sphere_colors), (self.n_spheres, 1))
return t
else:
return self._sphere_colors
@sphere_colors.setter
def sphere_colors(self, color):
if isinstance(color, tuple):
color = np.array(color)
self._sphere_colors = color
self.redraw()
@property
def current_sphere_positions(self):
idx = self.current_frame_id if self.sphere_positions.shape[0] > 1 else 0
return self.sphere_positions[idx]
@current_sphere_positions.setter
def current_sphere_positions(self, positions):
assert len(positions.shape) == 2
idx = self.current_frame_id if self.sphere_positions.shape[0] > 1 else 0
self.sphere_positions[idx] = positions
self.redraw()
@property
def sphere_positions(self):
return self._sphere_positions
@sphere_positions.setter
def sphere_positions(self, pos):
if len(pos.shape) == 2:
pos = pos[np.newaxis]
self._sphere_positions = pos
self.n_frames = len(self._sphere_positions)
self.redraw()
def on_frame_update(self):
self.redraw()
def redraw(self, **kwargs):
self._need_upload = True
@Node.once
def make_renderable(self, ctx: moderngl.Context):
self.prog = get_sphere_instanced_program()
vs_path = "sphere_instanced_positions.vs.glsl"
self.outline_program = get_outline_program(vs_path)
| # Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos
def _create_sphere(radius=1.0, rings=16, sectors=32):
"""
Create a sphere centered at the origin. This is a port of moderngl-window's geometry.sphere() function, but it
returns the vertices and faces explicitly instead of directly storing them in a VAO.
:param radius: Radius of the sphere.
:param rings: Longitudinal resolution.
:param sectors: Latitudinal resolution.
:return: vertices and faces of the sphere.
"""
R = 1.0 / (rings - 1)
S = 1.0 / (sectors - 1)
vertices = np.zeros((rings * sectors, 3))
v, n = 0, 0
for r in range(rings):
for s in range(sectors):
y = np.sin(-np.pi / 2 + np.pi * r * R)
x = np.cos(2 * np.pi * s * S) * np.sin(np.pi * r * R)
z = np.sin(2 * np.pi * s * S) * np.sin(np.pi * r * R)
vertices[v] = np.array([x, y, z]) * radius
v += 1
n += 1
faces = np.zeros([(rings - 1) * (sectors - 1) * 2, 3], dtype=np.int32)
i = 0
for r in range(rings - 1):
for s in range(sectors - 1):
faces[i] = np.array([r * sectors + s, (r + 1) * sectors + (s + 1), r * sectors + (s + 1)])
faces[i + 1] = np.array([r * sectors + s, (r + 1) * sectors + s, (r + 1) * sectors + (s + 1)])
i += 2
return vertices, faces
class Spheres(Node):
"""Render some simple spheres."""
def __init__(
self,
positions,
radius=0.01,
color=(0.0, 0.0, 1.0, 1.0),
rings=16,
sectors=32,
icon="\u008d",
cast_shadow=False,
**kwargs,
):
"""
Initializer.
:param positions: A numpy array of shape (F, N, 3) or (N, 3) containing N sphere positions for F time steps.
:param radius: Radius of the spheres.
:param color: Color of the spheres.
:param rings: Longitudinal resolution.
:param sectors: Latitudinal resolution.
"""
if len(positions.shape) == 2:
positions = positions[np.newaxis]
assert len(positions.shape) == 3
# Define a default material in case there is None.
if isinstance(color, tuple) or len(color.shape) == 1:
kwargs["material"] = kwargs.get("material", Material(color=color, ambient=0.2))
self.sphere_colors = kwargs["material"].color
else:
assert color.shape[1] == 4 and positions.shape[1] == color.shape[0]
self.sphere_colors = color
if "n_frames" not in kwargs:
kwargs["n_frames"] = positions.shape[0]
super().__init__(icon=icon, **kwargs)
self._sphere_positions = positions
self.radius = radius
self.vertices, self.faces = _create_sphere(radius=1.0, rings=rings, sectors=sectors)
self.n_vertices = self.vertices.shape[0]
self.n_spheres = self.sphere_positions.shape[1]
self.draw_edges = False
self._need_upload = True
# Render passes.
self.outline = True
self.fragmap = True
self.depth_prepass = True
self.cast_shadow = cast_shadow
@property
def bounds(self):
bounds = self.get_bounds(self.sphere_positions)
bounds[:, 0] -= self.radius
bounds[:, 1] += self.radius
return bounds
@property
def current_bounds(self):
bounds = self.get_bounds(self.current_sphere_positions)
bounds[:, 0] -= self.radius
bounds[:, 1] += self.radius
return bounds
@property
def vertex_colors(self):
if len(self._sphere_colors.shape) == 1:
return np.full((self.n_spheres * self.n_vertices, 4), self._sphere_colors)
else:
return np.tile(self._sphere_colors, (self.n_vertices, 1))
def color_one(self, index, color):
new_colors = np.tile(np.array(self.material.color), (self.n_spheres, 1))
new_colors[index] = color
self.sphere_colors = new_colors
@Node.color.setter
def color(self, color):
self.material.color = color
self.sphere_colors = color
self.redraw()
@property
def sphere_colors(self):
if len(self._sphere_colors.shape) == 1:
t = np.tile(np.array(self._sphere_colors), (self.n_spheres, 1))
return t
else:
return self._sphere_colors
@sphere_colors.setter
def sphere_colors(self, color):
if isinstance(color, tuple):
color = np.array(color)
self._sphere_colors = color
self.redraw()
@property
def current_sphere_positions(self):
idx = self.current_frame_id if self.sphere_positions.shape[0] > 1 else 0
return self.sphere_positions[idx]
@current_sphere_positions.setter
def current_sphere_positions(self, positions):
assert len(positions.shape) == 2
idx = self.current_frame_id if self.sphere_positions.shape[0] > 1 else 0
self.sphere_positions[idx] = positions
self.redraw()
@property
def sphere_positions(self):
return self._sphere_positions
@sphere_positions.setter
def sphere_positions(self, pos):
if len(pos.shape) == 2:
pos = pos[np.newaxis]
self._sphere_positions = pos
self.n_frames = len(self._sphere_positions)
self.redraw()
def on_frame_update(self):
self.redraw()
def redraw(self, **kwargs):
self._need_upload = True
@Node.once
def make_renderable(self, ctx: moderngl.Context):
self.prog = get_sphere_instanced_program()
vs_path = "sphere_instanced_positions.vs.glsl"
self.outline_program = get_outline_program(vs_path) | self.depth_only_program = get_depth_only_program(vs_path) | 2 | 2023-12-07 16:13:50+00:00 | 12k |
nexB/dejacode | dje/api.py | [
{
"identifier": "TabPermission",
"path": "dje/api_custom.py",
"snippet": "class TabPermission(permissions.BasePermission):\n \"\"\"\n Allow access only to superusers if the tab_permission are enabled\n for the user Dataspace.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"Return `True` if permission is granted, `False` otherwise.\"\"\"\n if request.user.is_superuser:\n return True\n\n if not request.user.dataspace.tab_permissions_enabled:\n return True"
},
{
"identifier": "copy_object",
"path": "dje/copier.py",
"snippet": "@transaction.atomic()\ndef copy_object(reference_obj, target_dataspace, user, update=False, **kwargs):\n \"\"\"\n Entry point for the copy or update of a given object in another `target`\n Dataspace.\n\n :param reference_obj: Instance of the object to be copied/updated.\n :param target_dataspace: Instance of the target Dataspace.\n :param user: The User to be referenced in the History.\n :param update: Try to update the target object if True\n :return: Instance of the new or updated object.\n \"\"\"\n model_name = reference_obj.__class__.__name__\n debug_message = \"copy_object: {operation} for <{model_name}>: {reference_obj}\"\n obj_in_target = get_object_in(reference_obj, target_dataspace)\n\n try:\n if not obj_in_target: # ADDITION\n # The following could throw an exception if some DB constraint is\n # not fulfilled\n logger.debug(debug_message.format(operation=\"COPY\", **locals()))\n copy_to(reference_obj, target_dataspace, user, **kwargs)\n elif update: # MODIFICATION, only if explicitly asked\n logger.debug(debug_message.format(operation=\"UPDATE\", **locals()))\n update_to(reference_obj, obj_in_target, user, **kwargs)\n else:\n # There's a Match in the target but no update asked, do nothing\n logger.debug(debug_message.format(operation=\"NOOP\", **locals()))\n return\n\n except IntegrityError:\n # Special case where the object cannot be copied nor updated because\n # on of his unique_together with dataspace fields (except\n # the one used for Matching in target) already exists.\n # The integrity error is propagated up from a DB constraints error as\n # the unicity constraint cannot be satisfied in the DB\n logger.debug(debug_message.format(operation=\"IntegrityError\", **locals()))\n raise # re-raise the exception\n\n # Refresh the copied/updated object, Return None if something went wrong\n return get_object_in(reference_obj, target_dataspace)"
},
{
"identifier": "ExtendedNullBooleanSelect",
"path": "dje/fields.py",
"snippet": "class ExtendedNullBooleanSelect(forms.widgets.NullBooleanSelect):\n \"\"\"\n Custom widget to extend the supported values for `BooleanField.null=True`.\n This need to be done at the widget level as a non-supported value from `data`\n will be `None` from the `value_from_datadict` output.\n Everything else will be considered Unknown (`NULL` in the database).\n \"\"\"\n\n def value_from_datadict(self, data, files, name):\n value = data.get(name, None)\n return {\n \"on\": True,\n \"2\": True,\n True: True,\n \"True\": True,\n \"3\": False,\n \"False\": False,\n False: False,\n # Extended values for 'True'\n \"true\": True,\n \"t\": True,\n \"T\": True,\n \"yes\": True,\n \"Yes\": True,\n \"y\": True,\n \"Y\": True,\n # Extended values for 'False'\n \"false\": False,\n \"f\": False,\n \"F\": False,\n \"no\": False,\n \"No\": False,\n \"n\": False,\n \"N\": False,\n }.get(value, None)"
},
{
"identifier": "LastModifiedDateFilter",
"path": "dje/filters.py",
"snippet": "class LastModifiedDateFilter(django_filters.DateTimeFilter):\n help_text = (\n \"Limits to records created or updated since that date. \"\n 'Supports both \"YYYY-MM-DD\" date and \"YYYY-MM-DD HH:MM\" datetime.'\n )\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"help_text\", self.help_text)\n kwargs[\"lookup_expr\"] = \"gte\"\n super().__init__(*args, **kwargs)"
},
{
"identifier": "MultipleUUIDFilter",
"path": "dje/filters.py",
"snippet": "class MultipleUUIDFilter(django_filters.MultipleChoiceFilter):\n \"\"\"Filter on multiple values for an `UUIDField` type using `?field=a&field=b` URL syntax.\"\"\"\n\n help_text = \"Exact UUID. Multi-value supported.\"\n field_class = MultipleUUIDField\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"help_text\", self.help_text)\n super().__init__(*args, **kwargs)"
},
{
"identifier": "Dataspace",
"path": "dje/models.py",
"snippet": "class Dataspace(models.Model):\n \"\"\"\n The Dataspace is a way to keep data for each organization data\n separated and still store them in the same database, schema or table.\n Therefore the Dataspace is part of the primary key of most models\n and it part of a unicity constraint for these models.\n For a given installation there can be several Owner Org defined, but only\n one reference.\n\n This is an important concept used throughout DejaCode to\n separate the reference data provided by nexB from the data used in a given\n installation of DJE.\n\n It is essentially a notion of tenant in a DJE installation and is used to\n segregate org-specific and/or org-private records enabling both\n multi-tenancy as well as nexB-provided reference data and org-specific or\n customized data.\n\n This separation has several purposes such as allowing:\n * orderly and simpler data update from the nexB reference data and inter\n Dataspace data exchange\n * Dataspace specific data customizations (for instance license\n tags configurations or some preferences)\n * multi-tenancy where different organizations can share the same DJE\n instance\n \"\"\"\n\n uuid = models.UUIDField(\n _(\"UUID\"),\n default=uuid.uuid4,\n editable=False,\n unique=True,\n )\n\n name = models.SlugField(\n unique=True,\n max_length=20,\n help_text=_(\n 'Unique name of a Dataspace. The name \"nexB\" is reserved for '\n \"the creators/maintainers of the system software. Dataspace name \"\n \"only allows letters, numbers, underscores and hyphens.\"\n ),\n )\n\n homepage_url = models.URLField(\n _(\"Homepage URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"The homepage URL of the Dataspace owner.\"),\n )\n\n contact_info = models.CharField(\n _(\"Contact information\"),\n max_length=500,\n blank=True,\n help_text=_(\n \"A dedicated email address or URL for contacting the owner of \"\n \"the Dataspace. Can be used for Attribution Package generation.\"\n ),\n )\n\n notes = models.TextField(\n blank=True,\n help_text=_(\"Extended Notes about a Dataspace.\"),\n )\n\n show_license_profile_in_license_list_view = models.BooleanField(\n default=False,\n verbose_name=format_lazy(\n \"Show {license_profile} in license list view\",\n license_profile=_(\"license profile\"),\n ),\n help_text=format_lazy(\n \"When true (checked), include the {license_profile} column in the license list view.\",\n license_profile=_(\"license profile\"),\n ),\n )\n\n show_license_type_in_license_list_view = models.BooleanField(\n default=True,\n help_text=_(\n \"When true (checked), include the license type column in the license list view.\",\n ),\n )\n\n show_spdx_short_identifier_in_license_list_view = models.BooleanField(\n verbose_name=_(\"show SPDX short identifier in license list view\"),\n default=False,\n help_text=_(\n \"When true (checked), include the SPDX short identifier in the license list view.\",\n ),\n )\n\n show_usage_policy_in_user_views = models.BooleanField(\n default=True,\n help_text=_(\n \"When true (checked), include the usage policy in user views that \"\n \"show licenses or components.\",\n ),\n )\n\n show_type_in_component_list_view = models.BooleanField(\n default=False,\n help_text=_(\n \"When true (checked), include the type column in the component list view.\",\n ),\n )\n\n hide_empty_fields_in_component_details_view = models.BooleanField(\n default=False,\n help_text=_(\"When true (checked), hide empty fields in the component details view.\"),\n )\n\n set_usage_policy_on_new_component_from_licenses = models.BooleanField(\n _(\"set usage policy on component or package from license policy\"),\n default=False,\n help_text=_(\n \"When true (checked), the application will automatically assign a usage \"\n \"policy to a component or package when its license expression is set or \"\n \"updated when you create, import, edit, or copy that component or package, \"\n \"based on the associated policies that you have defined on the license policy.\"\n ),\n )\n\n logo_url = models.URLField(\n _(\"Logo URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"URL to a Dataspace Logo. If set, it will be included in reports.\"),\n )\n\n full_name = models.CharField(\n max_length=100,\n blank=True,\n help_text=_(\n \"The full name of the Dataspace organization. \"\n \"Can be used for Attribution Package generation.\"\n ),\n )\n\n address = models.TextField(\n blank=True,\n help_text=(\n \"The address of the Dataspace organization. \"\n \"Can be used for Attribution Package generation.\"\n ),\n )\n\n open_source_information_url = models.URLField(\n _(\"Open Source Information URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\n \"A public URL where you publish information about the Dataspace \"\n \"organization's Open Source policies and procedures. \"\n \"Can be used for Attribution Package generation.\"\n ),\n )\n\n open_source_download_url = models.URLField(\n _(\"Open Source Download URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\n \"A public URL where you provide copies of Open Source software that \"\n \"require Redistribution when you use them in your products. Can be \"\n \"used for Attribution Package generation.\"\n ),\n )\n\n home_page_announcements = models.TextField(\n blank=True,\n help_text=_(\n \"Use this field to enter text to appear on the DejaCode home page, \"\n \"normally for the purpose of providing your user community with \"\n \"general-purpose announcements about using DejaCode. \"\n \"Note that you can include URL's in the text if you want to direct \"\n \"users to detailed instructions and announcements.\"\n ),\n )\n\n enable_package_scanning = models.BooleanField(\n default=False,\n help_text=_(\n 'When true (checked), allows a user to click the \"Scan Package\" button when viewing '\n \"a Package, initiating a call to ScanCode.io to scan the Package based on its URL. \"\n \"This setting also activates a DejaCode feature to submit any Package created using \"\n 'the \"Add Package\" button to ScanCode.io for scanning, and it activates the Scans '\n \"choice from the DejaCode Tools dropdown menu.\"\n ),\n )\n\n update_packages_from_scan = models.BooleanField(\n _(\"Update packages automatically from scan\"),\n default=False,\n help_text=_(\n \"When true (checked), enables an automatic DejaCode process to update \"\n \"selected Package fields (such as license expression, primary language, \"\n \"copyright, etc.) when a package scan is completed, depending on the \"\n \"quality of the scan results.\"\n ),\n )\n\n enable_purldb_access = models.BooleanField(\n _(\"Enable PurlDB access\"),\n default=False,\n help_text=_(\n \"When true (checked), enables user access to the PurlDB option from the Tools menu, \"\n \"which presents a list of PurlDB data mined and scanned automatically from multiple \"\n \"public sources. Users can view PurlDB details and can create DejaCode Package \"\n \"definitions using those details, and DejaCode also presents a new PurlDB tab when \"\n \"viewing the details of a Package with matching key values. This option also enhances \"\n \"the Global Search feature to extend the search scope beyond the standard DejaCode \"\n \"objects (Packages, Components, Licenses, Owners) and perform an asynchronous query of \"\n \"the PurlDB to find relevant data.\"\n ),\n )\n\n enable_vulnerablecodedb_access = models.BooleanField(\n _(\"Enable VulnerableCodeDB access\"),\n default=False,\n help_text=_(\n \"When true (checked), authorizes DejaCode to access the VulnerableCodeDB \"\n \"using a Package URL (purl) to determine if there are any reported \"\n \"vulnerabilities for a specific Package and return the Vulnerability ID \"\n \"and related URLs to a Vulnerabilities tab in the Package details user \"\n \"view.\"\n ),\n )\n\n objects = DataspaceManager()\n\n class Meta:\n ordering = [\"name\"]\n\n def __str__(self):\n return self.name\n\n def get_admin_url(self):\n opts = self._meta\n viewname = f\"admin:{opts.app_label}_{opts.model_name}_change\"\n return reverse(viewname, args=[self.pk])\n\n def natural_key(self):\n return (self.name,)\n\n @cached_property\n def is_reference(self):\n \"\"\"Return True if this Dataspace is the reference.\"\"\"\n reference = self.__class__._default_manager.get_reference()\n return True if reference and self == reference else False\n\n def get_configuration(self, field_name=None):\n \"\"\"\n Return the associated DataspaceConfiguration.\n If a `field_name` is provided, Return the value for that field from\n the `DataspaceConfiguration`.\n \"\"\"\n try:\n configuration = self.configuration\n except ObjectDoesNotExist:\n return\n\n if field_name:\n return getattr(configuration, field_name, None)\n return configuration\n\n @property\n def has_configuration(self):\n \"\"\"Return True if an associated DataspaceConfiguration instance exists.\"\"\"\n return bool(self.get_configuration())\n\n @property\n def tab_permissions_enabled(self):\n return bool(self.get_configuration(\"tab_permissions\"))"
},
{
"identifier": "ExternalReference",
"path": "dje/models.py",
"snippet": "class ExternalReference(HistoryFieldsMixin, DataspacedModel):\n \"\"\"\n Maps DJE objects to external resources.\n One DJE object may have several ExternalReference when it's referenced on\n multiple sources.\n Also, there is no unicity collision possible as we use the object_id.\n\n The copy for GenericForeignKey field is not supported yet.\n \"\"\"\n\n # The following models should always inherit from ExternalReferenceMixin\n # for the proper deletion in CASCADE behavior.\n CT_LIMIT = (\n models.Q(app_label=\"organization\", model=\"owner\")\n | models.Q(app_label=\"license_library\", model=\"license\")\n | models.Q(app_label=\"component_catalog\", model=\"component\")\n | models.Q(app_label=\"component_catalog\", model=\"package\")\n )\n\n content_type = models.ForeignKey(\n to=ContentType,\n limit_choices_to=CT_LIMIT,\n on_delete=models.PROTECT,\n )\n\n object_id = models.PositiveIntegerField()\n\n content_object = GenericForeignKey(\"content_type\", \"object_id\")\n\n external_source = models.ForeignKey(\n to=\"dje.ExternalSource\",\n on_delete=models.PROTECT,\n )\n\n external_id = models.CharField(\n max_length=500,\n blank=True,\n help_text=_(\"Value of the identifier used on the source to reference the object.\"),\n )\n\n external_url = models.URLField(\n max_length=1024,\n blank=True,\n help_text=_(\"A URL to the component, or component metadata, in the external source.\"),\n )\n\n objects = ExternalReferenceManager()\n\n class Meta:\n unique_together = (\"dataspace\", \"uuid\")\n ordering = [\"external_source\", \"external_id\"]\n\n def __str__(self):\n return f\"{self.external_source}: {self.external_id}\"\n\n def save(self, *args, **kwargs):\n self.dataspace = self.content_object.dataspace\n super().save(*args, **kwargs)"
},
{
"identifier": "History",
"path": "dje/models.py",
"snippet": "class History(models.Model):\n ADDITION = ADDITION\n CHANGE = CHANGE\n DELETION = DELETION\n\n ACTION_FLAG_CHOICES = (\n (ADDITION, _(\"Addition\")),\n (CHANGE, _(\"Change\")),\n (DELETION, _(\"Deletion\")),\n )\n\n object_dataspace = models.ForeignKey(\n to=\"dje.Dataspace\",\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n editable=False,\n )\n\n serialized_data = models.TextField(\n null=True,\n blank=True,\n editable=False,\n help_text=_(\"Serialized data of the instance just before this change.\"),\n )\n\n # The following fields are directly taken from django.contrib.admin.models.LogEntry\n # Since the LogEntry is not abstract we cannot properly inherit from it.\n\n action_time = models.DateTimeField(\n _(\"action time\"),\n default=timezone.now,\n editable=False,\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n models.CASCADE,\n verbose_name=_(\"user\"),\n )\n\n content_type = models.ForeignKey(\n ContentType,\n models.SET_NULL,\n verbose_name=_(\"content type\"),\n blank=True,\n null=True,\n )\n\n object_id = models.TextField(\n _(\"object id\"),\n blank=True,\n null=True,\n )\n\n object_repr = models.CharField(\n _(\"object repr\"),\n max_length=200,\n )\n\n action_flag = models.PositiveSmallIntegerField(\n _(\"action flag\"),\n choices=ACTION_FLAG_CHOICES,\n )\n\n # change_message is either a string or a JSON structure\n change_message = models.TextField(\n _(\"change message\"),\n blank=True,\n )\n\n objects = HistoryManager()\n\n class Meta:\n verbose_name = _(\"history entry\")\n verbose_name_plural = _(\"history entries\")\n ordering = (\"-action_time\",)\n\n # Clone the method from Django's LogEntry model.\n __repr__ = LogEntry.__repr__\n __str__ = LogEntry.__str__\n is_addition = LogEntry.is_addition\n is_change = LogEntry.is_change\n is_deletion = LogEntry.is_deletion\n get_change_message = LogEntry.get_change_message\n get_edited_object = LogEntry.get_edited_object\n get_admin_url = LogEntry.get_edited_object\n\n @classmethod\n def log_addition(cls, user, obj, message=None):\n \"\"\"Create History entry on Addition with the proper `change_message`.\"\"\"\n if not message:\n message = [{\"added\": {}}]\n\n return cls.objects.log_action(user, obj, cls.ADDITION, message)\n\n @classmethod\n def log_change(cls, user, obj, message, serialized_data=None):\n \"\"\"Create History entry on Change.\"\"\"\n return cls.objects.log_action(user, obj, cls.CHANGE, message, serialized_data)\n\n @classmethod\n def log_deletion(cls, user, obj):\n \"\"\"\n Create History entry on Deletion.\n Include the serialized_data if `as_json()` is available on the model class.\n \"\"\"\n serialized_data = None\n with suppress(AttributeError):\n serialized_data = obj.as_json()\n\n return cls.objects.log_action(user, obj, cls.DELETION, serialized_data=serialized_data)"
},
{
"identifier": "is_content_type_related",
"path": "dje/models.py",
"snippet": "def is_content_type_related(model_class):\n \"\"\"\n Return True if the given model_class has a ForeignKey field related to\n the ContentType model.\n \"\"\"\n return any(\n 1\n for field in model_class._meta.get_fields()\n if field.many_to_one and field.related_model == ContentType\n )"
},
{
"identifier": "is_secured",
"path": "dje/models.py",
"snippet": "def is_secured(manager):\n \"\"\"Return True if the `is_secured` attribute is set to True.\"\"\"\n if not issubclass(manager.__class__, models.Manager):\n raise AssertionError\n return getattr(manager, \"is_secured\", False)"
},
{
"identifier": "send_notification_email",
"path": "dje/notification.py",
"snippet": "def send_notification_email(user, instance, action, message=\"\"):\n if not has_email_settings():\n return\n\n if not hasattr(instance, \"dataspace\"):\n return\n\n recipients = get_user_model().objects.get_data_update_recipients(instance.dataspace)\n if not recipients:\n return\n\n verbose_name = instance._meta.verbose_name.capitalize()\n verbose_action = VERBOSE_ACTION[action]\n subject = f'{verbose_action} {verbose_name}: \"{instance}\"'\n body = (\n f'{verbose_name} \"{instance}\" in dataspace \"{instance.dataspace.name}\" '\n f\"{verbose_action.lower()} by: {user.first_name} {user.last_name} ({user.username})\"\n )\n\n if action is History.CHANGE and message:\n if message == \"No fields changed.\":\n return\n body += f\"\\n\\n{message}\"\n\n if action is not History.DELETION and settings.SITE_URL:\n site_url = settings.SITE_URL.rstrip(\"/\")\n body += f\"\\n\\n{site_url}{instance.get_admin_url()}\"\n\n send_mail_task.delay(subject, body, settings.DEFAULT_FROM_EMAIL, recipients)"
},
{
"identifier": "get_authorized_tabs",
"path": "dje/permissions.py",
"snippet": "def get_authorized_tabs(model_class, user):\n \"\"\"\n Return the authorized tabs for this `user` for the given `model_class` .\n Tab availability is always driven by the current user Dataspace.\n A `is_superuser` user can see all the tabs.\n The authorizations are based on Permissions Groups and defined in the\n DataspaceConfiguration.tab_permissions field.\n Each Groups defines a list of authorized tabs.\n Empty string is used to define authorizations for Users without any assigned Groups.\n In case multiple Groups are assigned to the User, all the authorized tabs are merged.\n \"\"\"\n if user.is_superuser:\n return\n\n tab_permissions = user.dataspace.get_configuration(\"tab_permissions\")\n # The authorization feature is not enabled for the current user dataspace\n if not tab_permissions:\n return\n\n model_name = model_class._meta.model_name\n user_groups = user.get_group_names() or [\"\"]\n authorized_tabs = set()\n for group_name in user_groups:\n group_tabs = tab_permissions.get(group_name, {}).get(model_name, [])\n authorized_tabs.update(group_tabs)\n\n return list(authorized_tabs) or [None]"
},
{
"identifier": "get_protected_fields",
"path": "dje/permissions.py",
"snippet": "def get_protected_fields(model_class, user):\n \"\"\"Return the list of protected fields names for the given `user`.\"\"\"\n protected_fields = getattr(model_class(), \"permission_protected_fields\", {})\n\n return [\n field_name\n for field_name, perm_codename in protected_fields.items()\n if not user.has_perm(f\"{model_class._meta.app_label}.{perm_codename}\")\n ]"
},
{
"identifier": "get_tabset_for_model",
"path": "dje/permissions.py",
"snippet": "def get_tabset_for_model(model_class):\n \"\"\"Return the tabset content for the given `model_class`.\"\"\"\n return get_all_tabsets().get(model_class._meta.verbose_name)"
},
{
"identifier": "construct_changes_details_message",
"path": "dje/utils.py",
"snippet": "def construct_changes_details_message(changes_details):\n msg = []\n header = '\\n\\n\\nChanges details for {model_class} \"{instance}\"'\n change_line = \"\\n\\n* {field}\\nOld value: {old}\\nNew value: {new}\"\n\n for instance, data in changes_details.items():\n msg.append(header.format(model_class=instance.__class__.__name__, instance=instance))\n for field, old, new in data:\n msg.append(change_line.format(field=field, old=old, new=new))\n return \"\".join(msg)"
},
{
"identifier": "extract_name_version",
"path": "dje/utils.py",
"snippet": "def extract_name_version(name_version_str):\n \"\"\"\n Return a name and a version extracted from the following syntax: 'name:version'\n Note that colons `:` characters are allowed in the name but not in the version.\n \"\"\"\n if not name_version_str or \":\" not in name_version_str:\n raise SyntaxError\n\n name, _, version = name_version_str.rpartition(\":\")\n return name, version"
},
{
"identifier": "has_permission",
"path": "dje/utils.py",
"snippet": "def has_permission(model, user, action):\n \"\"\"Return True is the `user` has the Permission for the given action of the model.\"\"\"\n opts = model._meta\n codename = get_permission_codename(action, opts)\n return user.has_perm(f\"{opts.app_label}.{codename}\")"
},
{
"identifier": "set_intermediate_explicit_m2m",
"path": "dje/utils.py",
"snippet": "def set_intermediate_explicit_m2m(instance, field, value):\n \"\"\"\n Deal with m2m with explicit intermediate through relation.\n Using get_or_create to avoid create duplicate entries.\n Warning: This will fail if required fields (except the 2 FKs) are defined\n on the intermediary model.\n \"\"\"\n for related_instance in value:\n field.remote_field.through.objects.get_or_create(\n **{\n field.m2m_field_name(): instance,\n field.m2m_reverse_field_name(): related_instance,\n \"dataspace\": instance.dataspace,\n }\n )"
}
] | import uuid
import django_filters
from contextlib import suppress
from urllib.parse import urlparse
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import ValidationError as DjangoValidationError
from django.db import IntegrityError
from django.db import models
from django.urls import Resolver404
from django.urls import get_script_prefix
from django.urls import resolve
from django.utils.encoding import uri_to_iri
from django.utils.text import get_text_list
from django.utils.translation import gettext as _
from django_filters.rest_framework import FilterSet
from rest_framework import mixins
from rest_framework import serializers
from rest_framework import status
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError as DRFValidationError
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import SAFE_METHODS
from rest_framework.relations import ManyRelatedField
from rest_framework.response import Response
from dje.api_custom import TabPermission
from dje.copier import copy_object
from dje.fields import ExtendedNullBooleanSelect
from dje.filters import LastModifiedDateFilter
from dje.filters import MultipleUUIDFilter
from dje.models import Dataspace
from dje.models import ExternalReference
from dje.models import History
from dje.models import is_content_type_related
from dje.models import is_secured
from dje.notification import send_notification_email
from dje.permissions import get_authorized_tabs
from dje.permissions import get_protected_fields
from dje.permissions import get_tabset_for_model
from dje.utils import construct_changes_details_message
from dje.utils import extract_name_version
from dje.utils import has_permission
from dje.utils import set_intermediate_explicit_m2m | 8,436 | for field_name, new_value in serializer.validated_data.items():
original_value = getattr(serializer.instance, field_name, None)
if new_value != original_value:
changed_data.append(field_name)
changes_details.append((field_name, original_value, new_value))
fields_name = [field.name for field in serializer.Meta.model._meta.get_fields()]
kwargs = {}
if "last_modified_by" in fields_name:
kwargs["last_modified_by"] = user
serialized_data = None
with suppress(AttributeError):
serialized_data = serializer.instance.as_json()
serializer.save(**kwargs)
if changed_data:
change_message = [_("Changed {}.").format(get_text_list(changed_data, _("and")))]
change_message = " ".join(change_message)
else:
change_message = _("No fields changed.")
History.log_change(user, serializer.instance, change_message, serialized_data)
if History.CHANGE in self.email_notification_on:
change_message += construct_changes_details_message(
{serializer.instance: changes_details}
)
send_notification_email(user, serializer.instance, History.CHANGE, change_message)
class ExtraPermissionsViewSetMixin:
def get_permissions(self):
permission_classes = super().get_permissions()
extra_permission = [permission() for permission in self.extra_permissions]
return permission_classes + extra_permission
class DataspacedSerializer(serializers.HyperlinkedModelSerializer):
def __init__(self, *args, **kwargs):
"""
Add the `dataspace` attribute from the request User Dataspace.
Required at save time and for validation.
"""
super().__init__(*args, **kwargs)
request = self.context.get("request", None)
self.dataspace = request.user.dataspace if request else None
def save(self, **kwargs):
"""
Add the current user dataspace in the object data and
Wrap the IntegrityError with proper DRFValidationError.
Starts by popping the m2m data before the actual save()
then set the m2m relations post save().
"""
# Pops the m2m data from the validated_data dict before save()
m2m_data = {
f: self._validated_data.pop(f.name)
for f in self.Meta.model._meta.get_fields()
if f.many_to_many and not f.auto_created and f.name in self._validated_data
}
if "uuid" in self.validated_data and not self.validated_data.get("uuid"):
kwargs.update({"uuid": uuid.uuid4()})
# Update the uuid in the view kwargs to allow a proper `get_object()` post update
updated_uuid = self.validated_data.get("uuid")
if updated_uuid:
self.context["view"].kwargs["uuid"] = updated_uuid
kwargs.update({"dataspace": self.dataspace})
try:
instance = super().save(**kwargs)
except (IntegrityError, DjangoValidationError) as e:
raise DRFValidationError(str(e))
for field, data in m2m_data.items():
set_intermediate_explicit_m2m(instance, field, data)
return instance
def validate(self, attrs):
"""Add the uniqueness validation calling the logic from Model.clean()."""
# Make a copy of the attrs and Remove the m2m values,
# since those cannot be part of the clean()
attrs_copy = attrs.copy()
for f in self.Meta.model._meta.get_fields():
if f.many_to_many and not f.auto_created:
attrs_copy.pop(f.name, None)
if isinstance(f, models.ManyToOneRel):
attrs_copy.pop(f.get_accessor_name(), None)
for field_name in getattr(self.Meta, "exclude_from_validate", []):
attrs_copy.pop(field_name, None)
instance = self.Meta.model(**attrs_copy)
instance.dataspace = self.dataspace
# Set the id from the `instance` to handle create vs. edit in Model.`clean()`
with suppress(AttributeError):
instance.id = self.instance.id
instance.clean(from_api=True)
return attrs
def get_fields(self):
"""Enable to override the UUID field. Also enabled the field level permissions."""
fields = super().get_fields()
if "uuid" in fields:
fields["uuid"].read_only = False
fields["uuid"].allow_null = True
request = self.context.get("request", None)
if request:
fields = self.apply_tabs_permission(fields, request.user)
| #
# Copyright (c) nexB Inc. and others. All rights reserved.
# DejaCode is a trademark of nexB Inc.
# SPDX-License-Identifier: AGPL-3.0-only
# See https://github.com/nexB/dejacode for support or download.
# See https://aboutcode.org for more information about AboutCode FOSS projects.
#
REFERENCE_VAR = "reference"
class CreateRetrieveUpdateListViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet,
):
"""Provide default `create`, `retrieve`, `update`, `partial_update`, and `list` actions."""
email_notification_on = []
allow_reference_access = False
def get_queryset(self):
"""
Allow to access the reference data if the `self.allow_reference_access` is True.
The `REFERENCE_VAR` needs to be provided as a GET parameter `?reference=1` in the URL.
The special value `combine` can be provided as value for the `reference` parameter,
`?reference=combine`, to return records from both the user dataspace and the reference
one.
The special `merge` value value will include the reference records excluding the
objects `uuid` already present in the user dataspace.
The reference data access is only available on `SAFE_METHODS` ('GET', 'HEAD', 'OPTIONS').
"""
user_dataspace = self.request.user.dataspace
base_qs = super().get_queryset()
user_qs = base_qs.scope(user_dataspace)
reference_params_value = self.request.GET.get(REFERENCE_VAR)
reference_access = all(
[
self.allow_reference_access,
reference_params_value,
self.request.method in SAFE_METHODS,
]
)
if not reference_access:
return user_qs
reference_dataspace = Dataspace.objects.get_reference()
if not reference_dataspace:
return user_qs
if reference_params_value not in ["combine", "merge"]:
reference_qs = base_qs.scope(reference_dataspace)
return reference_qs
combined_qs = base_qs.scope(user_dataspace, include_reference=True)
if reference_params_value == "merge":
return combined_qs.exclude(
uuid__in=models.Subquery(user_qs.values("uuid")),
dataspace=reference_dataspace,
)
return combined_qs
@action(detail=True, methods=["post"])
def copy_to_my_dataspace(self, request, uuid):
reference_dataspace = Dataspace.objects.get_reference()
permission_error = {"error": "You do not have rights to execute this action."}
reference_access = all(
[
self.allow_reference_access,
reference_dataspace,
]
)
if not reference_access:
return Response(permission_error, status=status.HTTP_400_BAD_REQUEST)
queryset = self.queryset.scope(reference_dataspace)
reference_object = get_object_or_404(queryset, uuid=uuid)
user = request.user
target_dataspace = user.dataspace
model_class = reference_object.__class__
if not has_permission(reference_object, user, "add"):
return Response(permission_error, status=status.HTTP_400_BAD_REQUEST)
if target_dataspace.is_reference:
data = {"error": "Target dataspace cannot be the reference one."}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
object_exists_in_target_dataspace = (
model_class._default_manager.scope(target_dataspace)
.filter(uuid=reference_object.uuid)
.exists()
)
if object_exists_in_target_dataspace:
data = {"error": "The object already exists in your local Dataspace."}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
copied_object = copy_object(reference_object, target_dataspace, user)
if not copied_object:
data = {"error": "The object could not be copied."}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
serializer = self.get_serializer(copied_object)
return Response(serializer.data)
def perform_create(self, serializer):
"""Add the Addition History."""
user = self.request.user
fields_name = [field.name for field in serializer.Meta.model._meta.get_fields()]
kwargs = {}
if "created_by" in fields_name:
kwargs["created_by"] = user
if "last_modified_by" in fields_name:
kwargs["last_modified_by"] = user
serializer.save(**kwargs)
History.log_addition(user, serializer.instance)
if History.ADDITION in self.email_notification_on:
send_notification_email(user, serializer.instance, History.ADDITION)
def perform_update(self, serializer):
"""Add the CHANGE History."""
changed_data = []
changes_details = []
user = self.request.user
for field_name, new_value in serializer.validated_data.items():
original_value = getattr(serializer.instance, field_name, None)
if new_value != original_value:
changed_data.append(field_name)
changes_details.append((field_name, original_value, new_value))
fields_name = [field.name for field in serializer.Meta.model._meta.get_fields()]
kwargs = {}
if "last_modified_by" in fields_name:
kwargs["last_modified_by"] = user
serialized_data = None
with suppress(AttributeError):
serialized_data = serializer.instance.as_json()
serializer.save(**kwargs)
if changed_data:
change_message = [_("Changed {}.").format(get_text_list(changed_data, _("and")))]
change_message = " ".join(change_message)
else:
change_message = _("No fields changed.")
History.log_change(user, serializer.instance, change_message, serialized_data)
if History.CHANGE in self.email_notification_on:
change_message += construct_changes_details_message(
{serializer.instance: changes_details}
)
send_notification_email(user, serializer.instance, History.CHANGE, change_message)
class ExtraPermissionsViewSetMixin:
def get_permissions(self):
permission_classes = super().get_permissions()
extra_permission = [permission() for permission in self.extra_permissions]
return permission_classes + extra_permission
class DataspacedSerializer(serializers.HyperlinkedModelSerializer):
def __init__(self, *args, **kwargs):
"""
Add the `dataspace` attribute from the request User Dataspace.
Required at save time and for validation.
"""
super().__init__(*args, **kwargs)
request = self.context.get("request", None)
self.dataspace = request.user.dataspace if request else None
def save(self, **kwargs):
"""
Add the current user dataspace in the object data and
Wrap the IntegrityError with proper DRFValidationError.
Starts by popping the m2m data before the actual save()
then set the m2m relations post save().
"""
# Pops the m2m data from the validated_data dict before save()
m2m_data = {
f: self._validated_data.pop(f.name)
for f in self.Meta.model._meta.get_fields()
if f.many_to_many and not f.auto_created and f.name in self._validated_data
}
if "uuid" in self.validated_data and not self.validated_data.get("uuid"):
kwargs.update({"uuid": uuid.uuid4()})
# Update the uuid in the view kwargs to allow a proper `get_object()` post update
updated_uuid = self.validated_data.get("uuid")
if updated_uuid:
self.context["view"].kwargs["uuid"] = updated_uuid
kwargs.update({"dataspace": self.dataspace})
try:
instance = super().save(**kwargs)
except (IntegrityError, DjangoValidationError) as e:
raise DRFValidationError(str(e))
for field, data in m2m_data.items():
set_intermediate_explicit_m2m(instance, field, data)
return instance
def validate(self, attrs):
"""Add the uniqueness validation calling the logic from Model.clean()."""
# Make a copy of the attrs and Remove the m2m values,
# since those cannot be part of the clean()
attrs_copy = attrs.copy()
for f in self.Meta.model._meta.get_fields():
if f.many_to_many and not f.auto_created:
attrs_copy.pop(f.name, None)
if isinstance(f, models.ManyToOneRel):
attrs_copy.pop(f.get_accessor_name(), None)
for field_name in getattr(self.Meta, "exclude_from_validate", []):
attrs_copy.pop(field_name, None)
instance = self.Meta.model(**attrs_copy)
instance.dataspace = self.dataspace
# Set the id from the `instance` to handle create vs. edit in Model.`clean()`
with suppress(AttributeError):
instance.id = self.instance.id
instance.clean(from_api=True)
return attrs
def get_fields(self):
"""Enable to override the UUID field. Also enabled the field level permissions."""
fields = super().get_fields()
if "uuid" in fields:
fields["uuid"].read_only = False
fields["uuid"].allow_null = True
request = self.context.get("request", None)
if request:
fields = self.apply_tabs_permission(fields, request.user)
| protected_fields = get_protected_fields(self.Meta.model, request.user) | 12 | 2023-12-07 16:57:42+00:00 | 12k |
wusize/CLIM | src/open_clip/eva_clip/factory.py | [
{
"identifier": "OPENAI_DATASET_MEAN",
"path": "src/open_clip/eva_clip/constants.py",
"snippet": "OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)"
},
{
"identifier": "OPENAI_DATASET_STD",
"path": "src/open_clip/eva_clip/constants.py",
"snippet": "OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)"
},
{
"identifier": "CLIP",
"path": "src/open_clip/eva_clip/model.py",
"snippet": "class CLIP(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n vision_cfg: CLIPVisionCfg,\n text_cfg: CLIPTextCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n ):\n super().__init__()\n self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)\n\n text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)\n self.transformer = text.transformer\n self.embed_dim = embed_dim\n self.vocab_size = text.vocab_size\n self.token_embedding = text.token_embedding\n self.positional_embedding = text.positional_embedding\n self.ln_final = text.ln_final\n self.text_projection = text.text_projection\n self.register_buffer('attn_mask', text.attn_mask, persistent=False)\n\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n\n def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):\n # lock image tower as per LiT - https://arxiv.org/abs/2111.07991\n self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.visual.set_grad_checkpointing(enable)\n self.transformer.grad_checkpointing = enable\n \n @torch.jit.ignore\n def no_weight_decay(self):\n return {'logit_scale'}\n\n def encode_image(self, image, normalize: bool = False):\n features = self.visual(image)\n return F.normalize(features, dim=-1) if normalize else features\n\n def encode_text(self, text, normalize: bool = False):\n cast_dtype = self.transformer.get_cast_dtype()\n\n x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]\n\n x = x + self.positional_embedding.to(cast_dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x, attn_mask=self.attn_mask)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection\n return F.normalize(x, dim=-1) if normalize else x\n\n def forward(self, image, text):\n image_features = self.encode_image(image, normalize=True)\n text_features = self.encode_text(text, normalize=True)\n return image_features, text_features, self.logit_scale.exp()"
},
{
"identifier": "CustomCLIP",
"path": "src/open_clip/eva_clip/model.py",
"snippet": "class CustomCLIP(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n vision_cfg: CLIPVisionCfg,\n text_cfg: CLIPTextCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n itm_task: bool = False,\n ):\n super().__init__()\n self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)\n self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)\n self.embed_dim = embed_dim\n print(f'Freeze text encoder parameters', flush=True)\n for param in self.text.parameters():\n param.requires_grad = False\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n\n def train(self, mode=True):\n super().train(mode)\n self.text.train(mode=False)\n return self\n\n\n def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False, **kwargs):\n # lock image tower as per LiT - https://arxiv.org/abs/2111.07991\n self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)\n\n def lock_text_tower(self, unlocked_layers:int=0, freeze_layer_norm:bool=True):\n self.text.lock(unlocked_layers, freeze_layer_norm)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.visual.set_grad_checkpointing(enable)\n self.text.set_grad_checkpointing(enable)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'logit_scale'}\n\n def encode_image(self, image, normalize: bool = False):\n features = self.visual(image)\n return F.normalize(features, dim=-1) if normalize else features\n\n def encode_text(self, text, normalize: bool = False):\n features = self.text(text)\n return F.normalize(features, dim=-1) if normalize else features\n\n def forward(self, image, text):\n image_features = self.encode_image(image, normalize=True)\n text_features = self.encode_text(text, normalize=True)\n return image_features, text_features, self.logit_scale.exp()\n\n def encode_dense(self, image, normalize: bool = False, keep_shape=False):\n features = self.visual.encode_dense(image, keep_shape=keep_shape)\n if normalize:\n if keep_shape:\n features = F.normalize(features, dim=1)\n else:\n features = F.normalize(features, dim=-1)\n return features\n\n def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False,\n extract_type='v1'):\n features = self.visual.extract_roi_features(image, normed_boxes, extract_type=extract_type)\n if normalize:\n features = F.normalize(features, dim=-1)\n return features\n\n def encode_masks(self, image, masks, normalize=True, mask_attn=False):\n mask_pooled = self.visual.mask_pool(image, masks)\n if normalize:\n mask_pooled = F.normalize(mask_pooled, dim=-1)\n return mask_pooled"
},
{
"identifier": "convert_weights_to_lp",
"path": "src/open_clip/eva_clip/model.py",
"snippet": "def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):\n \"\"\"Convert applicable model parameters to low-precision (bf16 or fp16)\"\"\"\n\n def _convert_weights(l):\n \n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\n l.weight.data = l.weight.data.to(dtype)\n if l.bias is not None:\n l.bias.data = l.bias.data.to(dtype)\n\n if isinstance(l, (nn.MultiheadAttention, Attention)):\n for attr in [*[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]], \"in_proj_bias\", \"bias_k\", \"bias_v\"]:\n tensor = getattr(l, attr, None)\n if tensor is not None:\n tensor.data = tensor.data.to(dtype)\n\n if isinstance(l, nn.Parameter):\n l.data = l.data.to(dtype)\n\n for name in [\"text_projection\", \"proj\"]:\n if hasattr(l, name) and isinstance(l, nn.Parameter):\n attr = getattr(l, name, None)\n if attr is not None:\n attr.data = attr.data.to(dtype)\n\n model.apply(_convert_weights)"
},
{
"identifier": "convert_to_custom_text_state_dict",
"path": "src/open_clip/eva_clip/model.py",
"snippet": "def convert_to_custom_text_state_dict(state_dict: dict):\n if 'text_projection' in state_dict:\n # old format state_dict, move text tower -> .text\n new_state_dict = {}\n for k, v in state_dict.items():\n if any(k.startswith(p) for p in (\n 'text_projection',\n 'positional_embedding',\n 'token_embedding',\n 'transformer',\n 'ln_final',\n 'logit_scale'\n )):\n k = 'text.' + k\n new_state_dict[k] = v\n return new_state_dict\n return state_dict"
},
{
"identifier": "get_cast_dtype",
"path": "src/open_clip/eva_clip/model.py",
"snippet": "def get_cast_dtype(precision: str):\n cast_dtype = None\n if precision == 'bf16':\n cast_dtype = torch.bfloat16\n elif precision == 'fp16':\n cast_dtype = torch.float16\n return cast_dtype"
},
{
"identifier": "load_openai_model",
"path": "src/open_clip/eva_clip/openai.py",
"snippet": "def load_openai_model(\n name: str,\n precision: Optional[str] = None,\n device: Optional[Union[str, torch.device]] = None,\n jit: bool = True,\n cache_dir: Optional[str] = None,\n):\n \"\"\"Load a CLIP model\n\n Parameters\n ----------\n name : str\n A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict\n precision: str\n Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.\n device : Union[str, torch.device]\n The device to put the loaded model\n jit : bool\n Whether to load the optimized JIT model (default) or more hackable non-JIT model.\n cache_dir : Optional[str]\n The directory to cache the downloaded model weights\n\n Returns\n -------\n model : torch.nn.Module\n The CLIP model\n preprocess : Callable[[PIL.Image], torch.Tensor]\n A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input\n \"\"\"\n if device is None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if precision is None:\n precision = 'fp32' if device == 'cpu' else 'fp16'\n\n if get_pretrained_url(name, 'openai'):\n model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)\n elif os.path.isfile(name):\n model_path = name\n else:\n raise RuntimeError(f\"Model {name} not found; available models = {list_openai_models()}\")\n\n try:\n # loading JIT archive\n model = torch.jit.load(model_path, map_location=device if jit else \"cpu\").eval()\n state_dict = None\n except RuntimeError:\n # loading saved state dict\n if jit:\n warnings.warn(f\"File {model_path} is not a JIT archive. Loading as a state dict instead\")\n jit = False\n state_dict = torch.load(model_path, map_location=\"cpu\")\n\n if not jit:\n # Build a non-jit model from the OpenAI jitted model state dict\n cast_dtype = get_cast_dtype(precision)\n try:\n model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)\n except KeyError:\n sd = {k[7:]: v for k, v in state_dict[\"state_dict\"].items()}\n model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)\n\n # model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use\n model = model.to(device)\n if precision.startswith('amp') or precision == 'fp32':\n model.float()\n elif precision == 'bf16':\n convert_weights_to_lp(model, dtype=torch.bfloat16)\n\n return model\n\n # patch the device names\n device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])\n device_node = [n for n in device_holder.graph.findAllNodes(\"prim::Constant\") if \"Device\" in repr(n)][-1]\n\n def patch_device(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"prim::Constant\"):\n if \"value\" in node.attributeNames() and str(node[\"value\"]).startswith(\"cuda\"):\n node.copyAttributes(device_node)\n\n model.apply(patch_device)\n patch_device(model.encode_image)\n patch_device(model.encode_text)\n\n # patch dtype to float32 (typically for CPU)\n if precision == 'fp32':\n float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])\n float_input = list(float_holder.graph.findNode(\"aten::to\").inputs())[1]\n float_node = float_input.node()\n\n def patch_float(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"aten::to\"):\n inputs = list(node.inputs())\n for i in [1, 2]: # dtype can be the second or third argument to aten::to()\n if inputs[i].node()[\"value\"] == 5:\n inputs[i].node().copyAttributes(float_node)\n\n model.apply(patch_float)\n patch_float(model.encode_image)\n patch_float(model.encode_text)\n model.float()\n\n # ensure image_size attr available at consistent location for both jit and non-jit\n model.visual.image_size = model.input_resolution.item()\n return model"
},
{
"identifier": "is_pretrained_cfg",
"path": "src/open_clip/eva_clip/pretrained.py",
"snippet": "def is_pretrained_cfg(model: str, tag: str):\n if model not in _PRETRAINED:\n return False\n return _clean_tag(tag) in _PRETRAINED[model]"
},
{
"identifier": "get_pretrained_cfg",
"path": "src/open_clip/eva_clip/pretrained.py",
"snippet": "def get_pretrained_cfg(model: str, tag: str):\n if model not in _PRETRAINED:\n return {}\n model_pretrained = _PRETRAINED[model]\n return model_pretrained.get(_clean_tag(tag), {})"
},
{
"identifier": "download_pretrained",
"path": "src/open_clip/eva_clip/pretrained.py",
"snippet": "def download_pretrained(\n cfg: Dict,\n force_hf_hub: bool = False,\n cache_dir: Union[str, None] = None,\n):\n target = ''\n if not cfg:\n return target\n\n download_url = cfg.get('url', '')\n download_hf_hub = cfg.get('hf_hub', '')\n if download_hf_hub and force_hf_hub:\n # use HF hub even if url exists\n download_url = ''\n\n if download_url:\n target = download_pretrained_from_url(download_url, cache_dir=cache_dir)\n elif download_hf_hub:\n has_hf_hub(True)\n # we assume the hf_hub entries in pretrained config combine model_id + filename in\n # 'org/model_name/filename.pt' form. To specify just the model id w/o filename and\n # use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.\n model_id, filename = os.path.split(download_hf_hub)\n if filename:\n target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)\n else:\n target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)\n\n return target"
},
{
"identifier": "list_pretrained_tags_by_model",
"path": "src/open_clip/eva_clip/pretrained.py",
"snippet": "def list_pretrained_tags_by_model(model: str):\n \"\"\" return all pretrain tags for the specified model architecture \"\"\"\n tags = []\n if model in _PRETRAINED:\n tags.extend(_PRETRAINED[model].keys())\n return tags"
},
{
"identifier": "image_transform",
"path": "src/open_clip/eva_clip/transform.py",
"snippet": "def image_transform(\n image_size: int,\n is_train: bool,\n mean: Optional[Tuple[float, ...]] = None,\n std: Optional[Tuple[float, ...]] = None,\n resize_longest_max: bool = False,\n fill_color: int = 0,\n):\n mean = mean or OPENAI_DATASET_MEAN\n if not isinstance(mean, (list, tuple)):\n mean = (mean,) * 3\n\n std = std or OPENAI_DATASET_STD\n if not isinstance(std, (list, tuple)):\n std = (std,) * 3\n\n if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:\n # for square size, pass size as int so that Resize() uses aspect preserving shortest edge\n image_size = image_size[0]\n\n normalize = Normalize(mean=mean, std=std)\n if is_train:\n return Compose([\n RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ])\n else:\n if resize_longest_max:\n transforms = [\n ResizeMaxSize(image_size, fill=fill_color)\n ]\n else:\n transforms = [\n Resize(image_size, interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n ]\n transforms.extend([\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ])\n return Compose(transforms)"
},
{
"identifier": "HFTokenizer",
"path": "src/open_clip/eva_clip/tokenizer.py",
"snippet": "class HFTokenizer:\n \"HuggingFace tokenizer wrapper\"\n def __init__(self, tokenizer_name:str):\n from transformers import AutoTokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n\n def __call__(self, texts:Union[str, List[str]], context_length:int=77) -> torch.Tensor:\n # same cleaning as for default tokenizer, except lowercasing\n # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance\n if isinstance(texts, str):\n texts = [texts]\n texts = [whitespace_clean(basic_clean(text)) for text in texts]\n input_ids = self.tokenizer(texts, return_tensors='pt', max_length=context_length, padding='max_length', truncation=True).input_ids\n return input_ids"
},
{
"identifier": "tokenize",
"path": "src/open_clip/eva_clip/tokenizer.py",
"snippet": "def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:\n \"\"\"\n Returns the tokenized representation of given input string(s)\n\n Parameters\n ----------\n texts : Union[str, List[str]]\n An input string or a list of input strings to tokenize\n context_length : int\n The context length to use; all CLIP models use 77 as the context length\n\n Returns\n -------\n A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]\n \"\"\"\n if isinstance(texts, str):\n texts = [texts]\n\n sot_token = _tokenizer.encoder[\"<start_of_text>\"]\n eot_token = _tokenizer.encoder[\"<end_of_text>\"]\n all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]\n result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)\n\n for i, tokens in enumerate(all_tokens):\n if len(tokens) > context_length:\n tokens = tokens[:context_length] # Truncate\n tokens[-1] = eot_token\n result[i, :len(tokens)] = torch.tensor(tokens)\n\n return result"
},
{
"identifier": "resize_clip_pos_embed",
"path": "src/open_clip/eva_clip/utils.py",
"snippet": "def resize_clip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):\n # Rescale the grid of position embeddings when loading from state_dict\n old_pos_embed = state_dict.get('visual.positional_embedding', None)\n if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):\n return\n grid_size = to_2tuple(model.visual.grid_size)\n extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)\n new_seq_len = grid_size[0] * grid_size[1] + extra_tokens\n if new_seq_len == old_pos_embed.shape[0]:\n return\n\n if extra_tokens:\n pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]\n else:\n pos_emb_tok, pos_emb_img = None, old_pos_embed\n old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))\n\n logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)\n pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)\n pos_emb_img = F.interpolate(\n pos_emb_img,\n size=grid_size,\n mode=interpolation,\n align_corners=True,\n )\n pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]\n if pos_emb_tok is not None:\n new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)\n else:\n new_pos_embed = pos_emb_img\n state_dict['visual.positional_embedding'] = new_pos_embed"
},
{
"identifier": "resize_evaclip_pos_embed",
"path": "src/open_clip/eva_clip/utils.py",
"snippet": "def resize_evaclip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):\n all_keys = list(state_dict.keys())\n # interpolate position embedding\n if 'visual.pos_embed' in state_dict:\n pos_embed_checkpoint = state_dict['visual.pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n num_patches = model.visual.patch_embed.num_patches\n num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n # class_token and dist_token are kept unchanged\n if orig_size != new_size:\n print(\"Position interpolate from %dx%d to %dx%d\" % (orig_size, orig_size, new_size, new_size))\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n state_dict['visual.pos_embed'] = new_pos_embed\n\n patch_embed_proj = state_dict['visual.patch_embed.proj.weight']\n patch_size = model.visual.patch_embed.patch_size\n state_dict['visual.patch_embed.proj.weight'] = torch.nn.functional.interpolate(\n patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)"
},
{
"identifier": "resize_visual_pos_embed",
"path": "src/open_clip/eva_clip/utils.py",
"snippet": "def resize_visual_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):\n # Rescale the grid of position embeddings when loading from state_dict\n old_pos_embed = state_dict.get('positional_embedding', None)\n if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):\n return\n grid_size = to_2tuple(model.visual.grid_size)\n extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)\n new_seq_len = grid_size[0] * grid_size[1] + extra_tokens\n if new_seq_len == old_pos_embed.shape[0]:\n return\n\n if extra_tokens:\n pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]\n else:\n pos_emb_tok, pos_emb_img = None, old_pos_embed\n old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))\n\n logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)\n pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)\n pos_emb_img = F.interpolate(\n pos_emb_img,\n size=grid_size,\n mode=interpolation,\n align_corners=True,\n )\n pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]\n if pos_emb_tok is not None:\n new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)\n else:\n new_pos_embed = pos_emb_img\n state_dict['positional_embedding'] = new_pos_embed"
},
{
"identifier": "resize_eva_pos_embed",
"path": "src/open_clip/eva_clip/utils.py",
"snippet": "def resize_eva_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):\n all_keys = list(state_dict.keys())\n # interpolate position embedding\n if 'pos_embed' in state_dict:\n pos_embed_checkpoint = state_dict['pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n num_patches = model.visual.patch_embed.num_patches\n num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n # class_token and dist_token are kept unchanged\n if orig_size != new_size:\n print(\"Position interpolate from %dx%d to %dx%d\" % (orig_size, orig_size, new_size, new_size))\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n state_dict['pos_embed'] = new_pos_embed\n\n patch_embed_proj = state_dict['patch_embed.proj.weight']\n patch_size = model.visual.patch_embed.patch_size\n state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate(\n patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)"
}
] | import json
import logging
import os
import pathlib
import re
import torch
from copy import deepcopy
from pathlib import Path
from typing import Optional, Tuple, Union, Dict, Any
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import CLIP, CustomCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
get_cast_dtype
from .openai import load_openai_model
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model
from .transform import image_transform
from .tokenizer import HFTokenizer, tokenize
from .utils import resize_clip_pos_embed, resize_evaclip_pos_embed, resize_visual_pos_embed, resize_eva_pos_embed | 9,537 | device = torch.device(device)
if pretrained and pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(
model_name,
precision=precision,
device=device,
jit=jit,
cache_dir=cache_dir,
)
else:
model_cfg = get_model_config(model_name)
if model_cfg is not None:
logging.info(f'Loaded {model_name} model config.')
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if 'rope' in model_cfg.get('vision_cfg', {}):
if model_cfg['vision_cfg']['rope']:
os.environ['RoPE'] = "1"
else:
os.environ['RoPE'] = "0"
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if force_patch_dropout is not None:
# override the default patch dropout value
model_cfg['vision_cfg']["patch_dropout"] = force_patch_dropout
cast_dtype = get_cast_dtype(precision)
custom_clip = model_cfg.pop('custom_text', False) or force_custom_clip or ('hf_model_name' in model_cfg['text_cfg'])
if custom_clip:
if 'hf_model_name' in model_cfg.get('text_cfg', {}):
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
model = CustomCLIP(**model_cfg, cast_dtype=cast_dtype)
else:
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
pretrained_cfg = {}
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model,
checkpoint_path,
model_key="model|module|state_dict",
strict=False
)
else:
error_str = (
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
logging.warning(error_str)
raise RuntimeError(error_str)
else:
visual_checkpoint_path = ''
text_checkpoint_path = ''
if pretrained_image:
pretrained_visual_model = pretrained_visual_model.replace('/', '-') # for callers using old naming with / in ViT names
pretrained_image_cfg = get_pretrained_cfg(pretrained_visual_model, pretrained_image)
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
elif pretrained_image_cfg:
visual_checkpoint_path = download_pretrained(pretrained_image_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained_image):
visual_checkpoint_path = pretrained_image
else:
logging.warning(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
raise RuntimeError(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
if pretrained_text:
pretrained_text_model = pretrained_text_model.replace('/', '-') # for callers using old naming with / in ViT names
pretrained_text_cfg = get_pretrained_cfg(pretrained_text_model, pretrained_text)
if pretrained_image_cfg:
text_checkpoint_path = download_pretrained(pretrained_text_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained_text):
text_checkpoint_path = pretrained_text
else:
logging.warning(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
raise RuntimeError(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
if visual_checkpoint_path:
logging.info(f'Loading pretrained {model_name}.visual weights ({visual_checkpoint_path}).')
if text_checkpoint_path:
logging.info(f'Loading pretrained {model_name}.text weights ({text_checkpoint_path}).')
if visual_checkpoint_path or text_checkpoint_path:
load_pretrained_checkpoint(
model,
visual_checkpoint_path,
text_checkpoint_path,
strict=False,
visual_model=pretrained_visual_model,
text_model=pretrained_text_model,
model_key="model|module|state_dict",
skip_list=skip_list
)
if "fp16" in precision or "bf16" in precision:
logging.info(f'convert precision to {precision}')
model = model.to(torch.bfloat16) if 'bf16' in precision else model.to(torch.float16)
model.to(device=device)
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
|
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, "r", encoding="utf8") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = dict(sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0])))
_rescan_model_configs() # initial populate of model config registry
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def get_model_config(model_name):
if model_name in _MODEL_CONFIGS:
return deepcopy(_MODEL_CONFIGS[model_name])
else:
return None
def get_tokenizer(model_name):
config = get_model_config(model_name)
tokenizer = HFTokenizer(config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
return tokenizer
# loading openai CLIP weights when is_openai=True for training
def load_state_dict(checkpoint_path: str, map_location: str='cpu', model_key: str='model|module|state_dict', is_openai: bool=False, skip_list: list=[]):
if is_openai:
model = torch.jit.load(checkpoint_path, map_location="cpu").eval()
state_dict = model.state_dict()
for key in ["input_resolution", "context_length", "vocab_size"]:
state_dict.pop(key, None)
else:
checkpoint = torch.load(checkpoint_path, map_location=map_location)
for mk in model_key.split('|'):
if isinstance(checkpoint, dict) and mk in checkpoint:
state_dict = checkpoint[mk]
break
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
for k in skip_list:
if k in list(state_dict.keys()):
logging.info(f"Removing key {k} from pretrained checkpoint")
del state_dict[k]
if os.getenv('RoPE') == '1':
for k in list(state_dict.keys()):
if 'freqs_cos' in k or 'freqs_sin' in k:
del state_dict[k]
return state_dict
def load_checkpoint(model, checkpoint_path, model_key="model|module|state_dict", strict=True):
state_dict = load_state_dict(checkpoint_path, model_key=model_key, is_openai=False)
# detect old format and make compatible with new format
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
state_dict = convert_to_custom_text_state_dict(state_dict)
if 'text.logit_scale' in state_dict and hasattr(model, 'logit_scale'):
state_dict['logit_scale'] = state_dict['text.logit_scale']
del state_dict['text.logit_scale']
# resize_clip_pos_embed for CLIP and open CLIP
if 'visual.positional_embedding' in state_dict:
resize_clip_pos_embed(state_dict, model)
# specified to eva_vit_model
elif 'visual.pos_embed' in state_dict:
resize_evaclip_pos_embed(state_dict, model)
# resize_clip_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
logging.info(f"incompatible_keys.missing_keys: {incompatible_keys.missing_keys}")
return incompatible_keys
def load_clip_visual_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]):
state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
for k in list(state_dict.keys()):
if not k.startswith('visual.'):
del state_dict[k]
for k in list(state_dict.keys()):
if k.startswith('visual.'):
new_k = k[7:]
state_dict[new_k] = state_dict[k]
del state_dict[k]
return state_dict
def load_clip_text_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]):
state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
for k in list(state_dict.keys()):
if k.startswith('visual.'):
del state_dict[k]
return state_dict
def get_pretrained_tag(pretrained_model):
pretrained_model = pretrained_model.lower()
if "laion" in pretrained_model or "open_clip" in pretrained_model:
return "open_clip"
elif "openai" in pretrained_model:
return "clip"
elif "eva" in pretrained_model and "clip" in pretrained_model:
return "eva_clip"
else:
return "other"
def load_pretrained_checkpoint(
model,
visual_checkpoint_path,
text_checkpoint_path,
strict=True,
visual_model=None,
text_model=None,
model_key="model|module|state_dict",
skip_list=[]):
visual_tag = get_pretrained_tag(visual_model)
text_tag = get_pretrained_tag(text_model)
logging.info(f"num of model state_dict keys: {len(model.state_dict().keys())}")
visual_incompatible_keys, text_incompatible_keys = None, None
if visual_checkpoint_path:
if visual_tag == "eva_clip" or visual_tag == "open_clip":
visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=False, skip_list=skip_list)
elif visual_tag == "clip":
visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=True, skip_list=skip_list)
else:
visual_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
# resize_clip_pos_embed for CLIP and open CLIP
if 'positional_embedding' in visual_state_dict:
resize_visual_pos_embed(visual_state_dict, model)
# specified to EVA model
elif 'pos_embed' in visual_state_dict:
resize_eva_pos_embed(visual_state_dict, model)
visual_incompatible_keys = model.visual.load_state_dict(visual_state_dict, strict=strict)
logging.info(f"num of loaded visual_state_dict keys: {len(visual_state_dict.keys())}")
logging.info(f"visual_incompatible_keys.missing_keys: {visual_incompatible_keys.missing_keys}")
if text_checkpoint_path:
if text_tag == "eva_clip" or text_tag == "open_clip":
text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=False, skip_list=skip_list)
elif text_tag == "clip":
text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=True, skip_list=skip_list)
else:
text_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
text_incompatible_keys = model.text.load_state_dict(text_state_dict, strict=strict)
logging.info(f"num of loaded text_state_dict keys: {len(text_state_dict.keys())}")
logging.info(f"text_incompatible_keys.missing_keys: {text_incompatible_keys.missing_keys}")
return visual_incompatible_keys, text_incompatible_keys
def create_model(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_clip: bool = False,
force_patch_dropout: Optional[float] = None,
pretrained_image: str = '',
pretrained_text: str = '',
pretrained_hf: bool = True,
pretrained_visual_model: str = None,
pretrained_text_model: str = None,
cache_dir: Optional[str] = None,
skip_list: list = [],
):
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
if isinstance(device, str):
device = torch.device(device)
if pretrained and pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(
model_name,
precision=precision,
device=device,
jit=jit,
cache_dir=cache_dir,
)
else:
model_cfg = get_model_config(model_name)
if model_cfg is not None:
logging.info(f'Loaded {model_name} model config.')
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if 'rope' in model_cfg.get('vision_cfg', {}):
if model_cfg['vision_cfg']['rope']:
os.environ['RoPE'] = "1"
else:
os.environ['RoPE'] = "0"
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if force_patch_dropout is not None:
# override the default patch dropout value
model_cfg['vision_cfg']["patch_dropout"] = force_patch_dropout
cast_dtype = get_cast_dtype(precision)
custom_clip = model_cfg.pop('custom_text', False) or force_custom_clip or ('hf_model_name' in model_cfg['text_cfg'])
if custom_clip:
if 'hf_model_name' in model_cfg.get('text_cfg', {}):
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
model = CustomCLIP(**model_cfg, cast_dtype=cast_dtype)
else:
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
pretrained_cfg = {}
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model,
checkpoint_path,
model_key="model|module|state_dict",
strict=False
)
else:
error_str = (
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
logging.warning(error_str)
raise RuntimeError(error_str)
else:
visual_checkpoint_path = ''
text_checkpoint_path = ''
if pretrained_image:
pretrained_visual_model = pretrained_visual_model.replace('/', '-') # for callers using old naming with / in ViT names
pretrained_image_cfg = get_pretrained_cfg(pretrained_visual_model, pretrained_image)
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
elif pretrained_image_cfg:
visual_checkpoint_path = download_pretrained(pretrained_image_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained_image):
visual_checkpoint_path = pretrained_image
else:
logging.warning(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
raise RuntimeError(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
if pretrained_text:
pretrained_text_model = pretrained_text_model.replace('/', '-') # for callers using old naming with / in ViT names
pretrained_text_cfg = get_pretrained_cfg(pretrained_text_model, pretrained_text)
if pretrained_image_cfg:
text_checkpoint_path = download_pretrained(pretrained_text_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained_text):
text_checkpoint_path = pretrained_text
else:
logging.warning(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
raise RuntimeError(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
if visual_checkpoint_path:
logging.info(f'Loading pretrained {model_name}.visual weights ({visual_checkpoint_path}).')
if text_checkpoint_path:
logging.info(f'Loading pretrained {model_name}.text weights ({text_checkpoint_path}).')
if visual_checkpoint_path or text_checkpoint_path:
load_pretrained_checkpoint(
model,
visual_checkpoint_path,
text_checkpoint_path,
strict=False,
visual_model=pretrained_visual_model,
text_model=pretrained_text_model,
model_key="model|module|state_dict",
skip_list=skip_list
)
if "fp16" in precision or "bf16" in precision:
logging.info(f'convert precision to {precision}')
model = model.to(torch.bfloat16) if 'bf16' in precision else model.to(torch.float16)
model.to(device=device)
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN | model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD | 1 | 2023-12-09 05:43:08+00:00 | 12k |
moonshot-admin/moonshot | third-party/pathspec-0.12.1/pathspec/gitignore.py | [
{
"identifier": "PathSpec",
"path": "third-party/pathspec-0.12.1/pathspec/pathspec.py",
"snippet": "class PathSpec(object):\n\t\"\"\"\n\tThe :class:`PathSpec` class is a wrapper around a list of compiled\n\t:class:`.Pattern` instances.\n\t\"\"\"\n\n\tdef __init__(self, patterns: Iterable[Pattern]) -> None:\n\t\t\"\"\"\n\t\tInitializes the :class:`PathSpec` instance.\n\n\t\t*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)\n\t\tyields each compiled pattern (:class:`.Pattern`).\n\t\t\"\"\"\n\t\tif not isinstance(patterns, CollectionType):\n\t\t\tpatterns = list(patterns)\n\n\t\tself.patterns: Collection[Pattern] = patterns\n\t\t\"\"\"\n\t\t*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)\n\t\tcontains the compiled patterns.\n\t\t\"\"\"\n\n\tdef __eq__(self, other: object) -> bool:\n\t\t\"\"\"\n\t\tTests the equality of this path-spec with *other* (:class:`PathSpec`)\n\t\tby comparing their :attr:`~PathSpec.patterns` attributes.\n\t\t\"\"\"\n\t\tif isinstance(other, PathSpec):\n\t\t\tpaired_patterns = zip_longest(self.patterns, other.patterns)\n\t\t\treturn all(a == b for a, b in paired_patterns)\n\t\telse:\n\t\t\treturn NotImplemented\n\n\tdef __len__(self) -> int:\n\t\t\"\"\"\n\t\tReturns the number of compiled patterns this path-spec contains\n\t\t(:class:`int`).\n\t\t\"\"\"\n\t\treturn len(self.patterns)\n\n\tdef __add__(self: Self, other: \"PathSpec\") -> Self:\n\t\t\"\"\"\n\t\tCombines the :attr:`Pathspec.patterns` patterns from two\n\t\t:class:`PathSpec` instances.\n\t\t\"\"\"\n\t\tif isinstance(other, PathSpec):\n\t\t\treturn self.__class__(self.patterns + other.patterns)\n\t\telse:\n\t\t\treturn NotImplemented\n\n\tdef __iadd__(self: Self, other: \"PathSpec\") -> Self:\n\t\t\"\"\"\n\t\tAdds the :attr:`Pathspec.patterns` patterns from one :class:`PathSpec`\n\t\tinstance to this instance.\n\t\t\"\"\"\n\t\tif isinstance(other, PathSpec):\n\t\t\tself.patterns += other.patterns\n\t\t\treturn self\n\t\telse:\n\t\t\treturn NotImplemented\n\n\tdef check_file(\n\t\tself,\n\t\tfile: TStrPath,\n\t\tseparators: Optional[Collection[str]] = None,\n\t) -> CheckResult[TStrPath]:\n\t\t\"\"\"\n\t\tCheck the files against this path-spec.\n\n\t\t*file* (:class:`str` or :class:`os.PathLike`) is the file path to be\n\t\tmatched against :attr:`self.patterns <PathSpec.patterns>`.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t\t:data:`None`) optionally contains the path separators to normalize. See\n\t\t:func:`~pathspec.util.normalize_file` for more information.\n\n\t\tReturns the file check result (:class:`~pathspec.util.CheckResult`).\n\t\t\"\"\"\n\t\tnorm_file = normalize_file(file, separators)\n\t\tinclude, index = self._match_file(enumerate(self.patterns), norm_file)\n\t\treturn CheckResult(file, include, index)\n\n\tdef check_files(\n\t\tself,\n\t\tfiles: Iterable[TStrPath],\n\t\tseparators: Optional[Collection[str]] = None,\n\t) -> Iterator[CheckResult[TStrPath]]:\n\t\t\"\"\"\n\t\tCheck the files against this path-spec.\n\n\t\t*files* (:class:`~collections.abc.Iterable` of :class:`str` or\n\t\t:class:`os.PathLike`) contains the file paths to be checked against\n\t\t:attr:`self.patterns <PathSpec.patterns>`.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t\t:data:`None`) optionally contains the path separators to normalize. See\n\t\t:func:`~pathspec.util.normalize_file` for more information.\n\n\t\tReturns an :class:`~collections.abc.Iterator` yielding each file check\n\t\tresult (:class:`~pathspec.util.CheckResult`).\n\t\t\"\"\"\n\t\tif not _is_iterable(files):\n\t\t\traise TypeError(f\"files:{files!r} is not an iterable.\")\n\n\t\tuse_patterns = _filter_check_patterns(self.patterns)\n\t\tfor orig_file in files:\n\t\t\tnorm_file = normalize_file(orig_file, separators)\n\t\t\tinclude, index = self._match_file(use_patterns, norm_file)\n\t\t\tyield CheckResult(orig_file, include, index)\n\n\tdef check_tree_files(\n\t\tself,\n\t\troot: StrPath,\n\t\ton_error: Optional[Callable[[OSError], None]] = None,\n\t\tfollow_links: Optional[bool] = None,\n\t) -> Iterator[CheckResult[str]]:\n\t\t\"\"\"\n\t\tWalks the specified root path for all files and checks them against this\n\t\tpath-spec.\n\n\t\t*root* (:class:`str` or :class:`os.PathLike`) is the root directory to\n\t\tsearch for files.\n\n\t\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally\n\t\tis the error handler for file-system exceptions. It will be called with the\n\t\texception (:exc:`OSError`). Reraise the exception to abort the walk. Default\n\t\tis :data:`None` to ignore file-system exceptions.\n\n\t\t*follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk\n\t\tsymbolic links that resolve to directories. Default is :data:`None` for\n\t\t:data:`True`.\n\n\t\t*negate* (:class:`bool` or :data:`None`) is whether to negate the match\n\t\tresults of the patterns. If :data:`True`, a pattern matching a file will\n\t\texclude the file rather than include it. Default is :data:`None` for\n\t\t:data:`False`.\n\n\t\tReturns an :class:`~collections.abc.Iterator` yielding each file check\n\t\tresult (:class:`~pathspec.util.CheckResult`).\n\t\t\"\"\"\n\t\tfiles = util.iter_tree_files(root, on_error=on_error, follow_links=follow_links)\n\t\tyield from self.check_files(files)\n\n\t@classmethod\n\tdef from_lines(\n\t\tcls: Type[Self],\n\t\tpattern_factory: Union[str, Callable[[AnyStr], Pattern]],\n\t\tlines: Iterable[AnyStr],\n\t) -> Self:\n\t\t\"\"\"\n\t\tCompiles the pattern lines.\n\n\t\t*pattern_factory* can be either the name of a registered pattern factory\n\t\t(:class:`str`), or a :class:`~collections.abc.Callable` used to compile\n\t\tpatterns. It must accept an uncompiled pattern (:class:`str`) and return the\n\t\tcompiled pattern (:class:`.Pattern`).\n\n\t\t*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled pattern\n\t\t(:class:`str`). This simply has to yield each line so that it can be a\n\t\t:class:`io.TextIOBase` (e.g., from :func:`open` or :class:`io.StringIO`) or\n\t\tthe result from :meth:`str.splitlines`.\n\n\t\tReturns the :class:`PathSpec` instance.\n\t\t\"\"\"\n\t\tif isinstance(pattern_factory, str):\n\t\t\tpattern_factory = util.lookup_pattern(pattern_factory)\n\n\t\tif not callable(pattern_factory):\n\t\t\traise TypeError(f\"pattern_factory:{pattern_factory!r} is not callable.\")\n\n\t\tif not _is_iterable(lines):\n\t\t\traise TypeError(f\"lines:{lines!r} is not an iterable.\")\n\n\t\tpatterns = [pattern_factory(line) for line in lines if line]\n\t\treturn cls(patterns)\n\n\tdef match_entries(\n\t\tself,\n\t\tentries: Iterable[TreeEntry],\n\t\tseparators: Optional[Collection[str]] = None,\n\t\t*,\n\t\tnegate: Optional[bool] = None,\n\t) -> Iterator[TreeEntry]:\n\t\t\"\"\"\n\t\tMatches the entries to this path-spec.\n\n\t\t*entries* (:class:`~collections.abc.Iterable` of :class:`~pathspec.util.TreeEntry`)\n\t\tcontains the entries to be matched against :attr:`self.patterns <PathSpec.patterns>`.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t\t:data:`None`) optionally contains the path separators to normalize. See\n\t\t:func:`~pathspec.util.normalize_file` for more information.\n\n\t\t*negate* (:class:`bool` or :data:`None`) is whether to negate the match\n\t\tresults of the patterns. If :data:`True`, a pattern matching a file will\n\t\texclude the file rather than include it. Default is :data:`None` for\n\t\t:data:`False`.\n\n\t\tReturns the matched entries (:class:`~collections.abc.Iterator` of\n\t\t:class:`~pathspec.util.TreeEntry`).\n\t\t\"\"\"\n\t\tif not _is_iterable(entries):\n\t\t\traise TypeError(f\"entries:{entries!r} is not an iterable.\")\n\n\t\tuse_patterns = _filter_check_patterns(self.patterns)\n\t\tfor entry in entries:\n\t\t\tnorm_file = normalize_file(entry.path, separators)\n\t\t\tinclude, _index = self._match_file(use_patterns, norm_file)\n\n\t\t\tif negate:\n\t\t\t\tinclude = not include\n\n\t\t\tif include:\n\t\t\t\tyield entry\n\n\t_match_file = staticmethod(util.check_match_file)\n\t\"\"\"\n\tMatch files using the `check_match_file()` utility function. Subclasses may\n\toverride this method as an instance method. It does not have to be a static\n\tmethod. The signature for this method is subject to change.\n\t\"\"\"\n\n\tdef match_file(\n\t\tself,\n\t\tfile: StrPath,\n\t\tseparators: Optional[Collection[str]] = None,\n\t) -> bool:\n\t\t\"\"\"\n\t\tMatches the file to this path-spec.\n\n\t\t*file* (:class:`str` or :class:`os.PathLike`) is the file path to be\n\t\tmatched against :attr:`self.patterns <PathSpec.patterns>`.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`)\n\t\toptionally contains the path separators to normalize. See\n\t\t:func:`~pathspec.util.normalize_file` for more information.\n\n\t\tReturns :data:`True` if *file* matched; otherwise, :data:`False`.\n\t\t\"\"\"\n\t\tnorm_file = normalize_file(file, separators)\n\t\tinclude, _index = self._match_file(enumerate(self.patterns), norm_file)\n\t\treturn bool(include)\n\n\tdef match_files(\n\t\tself,\n\t\tfiles: Iterable[StrPath],\n\t\tseparators: Optional[Collection[str]] = None,\n\t\t*,\n\t\tnegate: Optional[bool] = None,\n\t) -> Iterator[StrPath]:\n\t\t\"\"\"\n\t\tMatches the files to this path-spec.\n\n\t\t*files* (:class:`~collections.abc.Iterable` of :class:`str` or\n\t\t:class:`os.PathLike`) contains the file paths to be matched against\n\t\t:attr:`self.patterns <PathSpec.patterns>`.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t\t:data:`None`) optionally contains the path separators to normalize. See\n\t\t:func:`~pathspec.util.normalize_file` for more information.\n\n\t\t*negate* (:class:`bool` or :data:`None`) is whether to negate the match\n\t\tresults of the patterns. If :data:`True`, a pattern matching a file will\n\t\texclude the file rather than include it. Default is :data:`None` for\n\t\t:data:`False`.\n\n\t\tReturns the matched files (:class:`~collections.abc.Iterator` of\n\t\t:class:`str` or :class:`os.PathLike`).\n\t\t\"\"\"\n\t\tif not _is_iterable(files):\n\t\t\traise TypeError(f\"files:{files!r} is not an iterable.\")\n\n\t\tuse_patterns = _filter_check_patterns(self.patterns)\n\t\tfor orig_file in files:\n\t\t\tnorm_file = normalize_file(orig_file, separators)\n\t\t\tinclude, _index = self._match_file(use_patterns, norm_file)\n\n\t\t\tif negate:\n\t\t\t\tinclude = not include\n\n\t\t\tif include:\n\t\t\t\tyield orig_file\n\n\tdef match_tree_entries(\n\t\tself,\n\t\troot: StrPath,\n\t\ton_error: Optional[Callable[[OSError], None]] = None,\n\t\tfollow_links: Optional[bool] = None,\n\t\t*,\n\t\tnegate: Optional[bool] = None,\n\t) -> Iterator[TreeEntry]:\n\t\t\"\"\"\n\t\tWalks the specified root path for all files and matches them to this\n\t\tpath-spec.\n\n\t\t*root* (:class:`str` or :class:`os.PathLike`) is the root directory to\n\t\tsearch.\n\n\t\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally\n\t\tis the error handler for file-system exceptions. It will be called with the\n\t\texception (:exc:`OSError`). Reraise the exception to abort the walk. Default\n\t\tis :data:`None` to ignore file-system exceptions.\n\n\t\t*follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk\n\t\tsymbolic links that resolve to directories. Default is :data:`None` for\n\t\t:data:`True`.\n\n\t\t*negate* (:class:`bool` or :data:`None`) is whether to negate the match\n\t\tresults of the patterns. If :data:`True`, a pattern matching a file will\n\t\texclude the file rather than include it. Default is :data:`None` for\n\t\t:data:`False`.\n\n\t\tReturns the matched files (:class:`~collections.abc.Iterator` of\n\t\t:class:`.TreeEntry`).\n\t\t\"\"\"\n\t\tentries = util.iter_tree_entries(root, on_error=on_error, follow_links=follow_links)\n\t\tyield from self.match_entries(entries, negate=negate)\n\n\tdef match_tree_files(\n\t\tself,\n\t\troot: StrPath,\n\t\ton_error: Optional[Callable[[OSError], None]] = None,\n\t\tfollow_links: Optional[bool] = None,\n\t\t*,\n\t\tnegate: Optional[bool] = None,\n\t) -> Iterator[str]:\n\t\t\"\"\"\n\t\tWalks the specified root path for all files and matches them to this\n\t\tpath-spec.\n\n\t\t*root* (:class:`str` or :class:`os.PathLike`) is the root directory to\n\t\tsearch for files.\n\n\t\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally\n\t\tis the error handler for file-system exceptions. It will be called with the\n\t\texception (:exc:`OSError`). Reraise the exception to abort the walk. Default\n\t\tis :data:`None` to ignore file-system exceptions.\n\n\t\t*follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk\n\t\tsymbolic links that resolve to directories. Default is :data:`None` for\n\t\t:data:`True`.\n\n\t\t*negate* (:class:`bool` or :data:`None`) is whether to negate the match\n\t\tresults of the patterns. If :data:`True`, a pattern matching a file will\n\t\texclude the file rather than include it. Default is :data:`None` for\n\t\t:data:`False`.\n\n\t\tReturns the matched files (:class:`~collections.abc.Iterable` of\n\t\t:class:`str`).\n\t\t\"\"\"\n\t\tfiles = util.iter_tree_files(root, on_error=on_error, follow_links=follow_links)\n\t\tyield from self.match_files(files, negate=negate)\n\n\t# Alias `match_tree_files()` as `match_tree()` for backward compatibility\n\t# before v0.3.2.\n\tmatch_tree = match_tree_files"
},
{
"identifier": "Pattern",
"path": "third-party/pathspec-0.12.1/pathspec/pattern.py",
"snippet": "class Pattern(object):\n\t\"\"\"\n\tThe :class:`Pattern` class is the abstract definition of a pattern.\n\t\"\"\"\n\n\t# Make the class dict-less.\n\t__slots__ = (\n\t\t'include',\n\t)\n\n\tdef __init__(self, include: Optional[bool]) -> None:\n\t\t\"\"\"\n\t\tInitializes the :class:`Pattern` instance.\n\n\t\t*include* (:class:`bool` or :data:`None`) is whether the matched files\n\t\tshould be included (:data:`True`), excluded (:data:`False`), or is a\n\t\tnull-operation (:data:`None`).\n\t\t\"\"\"\n\n\t\tself.include = include\n\t\t\"\"\"\n\t\t*include* (:class:`bool` or :data:`None`) is whether the matched files\n\t\tshould be included (:data:`True`), excluded (:data:`False`), or is a\n\t\tnull-operation (:data:`None`).\n\t\t\"\"\"\n\n\tdef match(self, files: Iterable[str]) -> Iterator[str]:\n\t\t\"\"\"\n\t\tDEPRECATED: This method is no longer used and has been replaced by\n\t\t:meth:`.match_file`. Use the :meth:`.match_file` method with a loop for\n\t\tsimilar results.\n\n\t\tMatches this pattern against the specified files.\n\n\t\t*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each\n\t\tfile relative to the root directory (e.g., ``\"relative/path/to/file\"``).\n\n\t\tReturns an :class:`~collections.abc.Iterable` yielding each matched file\n\t\tpath (:class:`str`).\n\t\t\"\"\"\n\t\twarnings.warn((\n\t\t\t\"{cls.__module__}.{cls.__qualname__}.match() is deprecated. Use \"\n\t\t\t\"{cls.__module__}.{cls.__qualname__}.match_file() with a loop for \"\n\t\t\t\"similar results.\"\n\t\t).format(cls=self.__class__), DeprecationWarning, stacklevel=2)\n\n\t\tfor file in files:\n\t\t\tif self.match_file(file) is not None:\n\t\t\t\tyield file\n\n\tdef match_file(self, file: str) -> Optional[Any]:\n\t\t\"\"\"\n\t\tMatches this pattern against the specified file.\n\n\t\t*file* (:class:`str`) is the normalized file path to match against.\n\n\t\tReturns the match result if *file* matched; otherwise, :data:`None`.\n\t\t\"\"\"\n\t\traise NotImplementedError((\n\t\t\t\"{cls.__module__}.{cls.__qualname__} must override match_file().\"\n\t\t).format(cls=self.__class__))"
},
{
"identifier": "GitWildMatchPattern",
"path": "third-party/pathspec-0.12.1/pathspec/patterns/gitwildmatch.py",
"snippet": "class GitWildMatchPattern(RegexPattern):\n\t\"\"\"\n\tThe :class:`GitWildMatchPattern` class represents a compiled Git wildmatch\n\tpattern.\n\t\"\"\"\n\n\t# Keep the dict-less class hierarchy.\n\t__slots__ = ()\n\n\t@classmethod\n\tdef pattern_to_regex(\n\t\tcls,\n\t\tpattern: AnyStr,\n\t) -> Tuple[Optional[AnyStr], Optional[bool]]:\n\t\t\"\"\"\n\t\tConvert the pattern into a regular expression.\n\n\t\t*pattern* (:class:`str` or :class:`bytes`) is the pattern to convert into a\n\t\tregular expression.\n\n\t\tReturns the uncompiled regular expression (:class:`str`, :class:`bytes`, or\n\t\t:data:`None`); and whether matched files should be included (:data:`True`),\n\t\texcluded (:data:`False`), or if it is a null-operation (:data:`None`).\n\t\t\"\"\"\n\t\tif isinstance(pattern, str):\n\t\t\treturn_type = str\n\t\telif isinstance(pattern, bytes):\n\t\t\treturn_type = bytes\n\t\t\tpattern = pattern.decode(_BYTES_ENCODING)\n\t\telse:\n\t\t\traise TypeError(f\"pattern:{pattern!r} is not a unicode or byte string.\")\n\n\t\toriginal_pattern = pattern\n\n\t\tif pattern.endswith('\\\\ '):\n\t\t\t# EDGE CASE: Spaces can be escaped with backslash. If a pattern that ends\n\t\t\t# with backslash followed by a space, only strip from left.\n\t\t\tpattern = pattern.lstrip()\n\t\telse:\n\t\t\tpattern = pattern.strip()\n\n\t\tif pattern.startswith('#'):\n\t\t\t# A pattern starting with a hash ('#') serves as a comment (neither\n\t\t\t# includes nor excludes files). Escape the hash with a back-slash to match\n\t\t\t# a literal hash (i.e., '\\#').\n\t\t\tregex = None\n\t\t\tinclude = None\n\n\t\telif pattern == '/':\n\t\t\t# EDGE CASE: According to `git check-ignore` (v2.4.1), a single '/' does\n\t\t\t# not match any file.\n\t\t\tregex = None\n\t\t\tinclude = None\n\n\t\telif pattern:\n\t\t\tif pattern.startswith('!'):\n\t\t\t\t# A pattern starting with an exclamation mark ('!') negates the pattern\n\t\t\t\t# (exclude instead of include). Escape the exclamation mark with a\n\t\t\t\t# back-slash to match a literal exclamation mark (i.e., '\\!').\n\t\t\t\tinclude = False\n\t\t\t\t# Remove leading exclamation mark.\n\t\t\t\tpattern = pattern[1:]\n\t\t\telse:\n\t\t\t\tinclude = True\n\n\t\t\t# Allow a regex override for edge cases that cannot be handled through\n\t\t\t# normalization.\n\t\t\toverride_regex = None\n\n\t\t\t# Split pattern into segments.\n\t\t\tpattern_segs = pattern.split('/')\n\n\t\t\t# Check whether the pattern is specifically a directory pattern before\n\t\t\t# normalization.\n\t\t\tis_dir_pattern = not pattern_segs[-1]\n\n\t\t\t# Normalize pattern to make processing easier.\n\n\t\t\t# EDGE CASE: Deal with duplicate double-asterisk sequences. Collapse each\n\t\t\t# sequence down to one double-asterisk. Iterate over the segments in\n\t\t\t# reverse and remove the duplicate double asterisks as we go.\n\t\t\tfor i in range(len(pattern_segs) - 1, 0, -1):\n\t\t\t\tprev = pattern_segs[i-1]\n\t\t\t\tseg = pattern_segs[i]\n\t\t\t\tif prev == '**' and seg == '**':\n\t\t\t\t\tdel pattern_segs[i]\n\n\t\t\tif len(pattern_segs) == 2 and pattern_segs[0] == '**' and not pattern_segs[1]:\n\t\t\t\t# EDGE CASE: The '**/' pattern should match everything except individual\n\t\t\t\t# files in the root directory. This case cannot be adequately handled\n\t\t\t\t# through normalization. Use the override.\n\t\t\t\toverride_regex = f'^.+(?P<{_DIR_MARK}>/).*$'\n\n\t\t\tif not pattern_segs[0]:\n\t\t\t\t# A pattern beginning with a slash ('/') will only match paths directly\n\t\t\t\t# on the root directory instead of any descendant paths. So, remove\n\t\t\t\t# empty first segment to make pattern relative to root.\n\t\t\t\tdel pattern_segs[0]\n\n\t\t\telif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]):\n\t\t\t\t# A single pattern without a beginning slash ('/') will match any\n\t\t\t\t# descendant path. This is equivalent to \"**/{pattern}\". So, prepend\n\t\t\t\t# with double-asterisks to make pattern relative to root.\n\t\t\t\t# - EDGE CASE: This also holds for a single pattern with a trailing\n\t\t\t\t# slash (e.g. dir/).\n\t\t\t\tif pattern_segs[0] != '**':\n\t\t\t\t\tpattern_segs.insert(0, '**')\n\n\t\t\telse:\n\t\t\t\t# EDGE CASE: A pattern without a beginning slash ('/') but contains at\n\t\t\t\t# least one prepended directory (e.g. \"dir/{pattern}\") should not match\n\t\t\t\t# \"**/dir/{pattern}\", according to `git check-ignore` (v2.4.1).\n\t\t\t\tpass\n\n\t\t\tif not pattern_segs:\n\t\t\t\t# After resolving the edge cases, we end up with no pattern at all. This\n\t\t\t\t# must be because the pattern is invalid.\n\t\t\t\traise GitWildMatchPatternError(f\"Invalid git pattern: {original_pattern!r}\")\n\n\t\t\tif not pattern_segs[-1] and len(pattern_segs) > 1:\n\t\t\t\t# A pattern ending with a slash ('/') will match all descendant paths if\n\t\t\t\t# it is a directory but not if it is a regular file. This is equivalent\n\t\t\t\t# to \"{pattern}/**\". So, set last segment to a double-asterisk to\n\t\t\t\t# include all descendants.\n\t\t\t\tpattern_segs[-1] = '**'\n\n\t\t\tif override_regex is None:\n\t\t\t\t# Build regular expression from pattern.\n\t\t\t\toutput = ['^']\n\t\t\t\tneed_slash = False\n\t\t\t\tend = len(pattern_segs) - 1\n\t\t\t\tfor i, seg in enumerate(pattern_segs):\n\t\t\t\t\tif seg == '**':\n\t\t\t\t\t\tif i == 0 and i == end:\n\t\t\t\t\t\t\t# A pattern consisting solely of double-asterisks ('**') will\n\t\t\t\t\t\t\t# match every path.\n\t\t\t\t\t\t\toutput.append(f'[^/]+(?:/.*)?')\n\n\t\t\t\t\t\telif i == 0:\n\t\t\t\t\t\t\t# A normalized pattern beginning with double-asterisks\n\t\t\t\t\t\t\t# ('**') will match any leading path segments.\n\t\t\t\t\t\t\toutput.append('(?:.+/)?')\n\t\t\t\t\t\t\tneed_slash = False\n\n\t\t\t\t\t\telif i == end:\n\t\t\t\t\t\t\t# A normalized pattern ending with double-asterisks ('**') will\n\t\t\t\t\t\t\t# match any trailing path segments.\n\t\t\t\t\t\t\tif is_dir_pattern:\n\t\t\t\t\t\t\t\toutput.append(f'(?P<{_DIR_MARK}>/).*')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\toutput.append(f'/.*')\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# A pattern with inner double-asterisks ('**') will match multiple\n\t\t\t\t\t\t\t# (or zero) inner path segments.\n\t\t\t\t\t\t\toutput.append('(?:/.+)?')\n\t\t\t\t\t\t\tneed_slash = True\n\n\t\t\t\t\telif seg == '*':\n\t\t\t\t\t\t# Match single path segment.\n\t\t\t\t\t\tif need_slash:\n\t\t\t\t\t\t\toutput.append('/')\n\n\t\t\t\t\t\toutput.append('[^/]+')\n\n\t\t\t\t\t\tif i == end:\n\t\t\t\t\t\t\t# A pattern ending without a slash ('/') will match a file or a\n\t\t\t\t\t\t\t# directory (with paths underneath it). E.g., \"foo\" matches \"foo\",\n\t\t\t\t\t\t\t# \"foo/bar\", \"foo/bar/baz\", etc.\n\t\t\t\t\t\t\toutput.append(f'(?:(?P<{_DIR_MARK}>/).*)?')\n\n\t\t\t\t\t\tneed_slash = True\n\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Match segment glob pattern.\n\t\t\t\t\t\tif need_slash:\n\t\t\t\t\t\t\toutput.append('/')\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\toutput.append(cls._translate_segment_glob(seg))\n\t\t\t\t\t\texcept ValueError as e:\n\t\t\t\t\t\t\traise GitWildMatchPatternError(f\"Invalid git pattern: {original_pattern!r}\") from e\n\n\t\t\t\t\t\tif i == end:\n\t\t\t\t\t\t\t# A pattern ending without a slash ('/') will match a file or a\n\t\t\t\t\t\t\t# directory (with paths underneath it). E.g., \"foo\" matches \"foo\",\n\t\t\t\t\t\t\t# \"foo/bar\", \"foo/bar/baz\", etc.\n\t\t\t\t\t\t\toutput.append(f'(?:(?P<{_DIR_MARK}>/).*)?')\n\n\t\t\t\t\t\tneed_slash = True\n\n\t\t\t\toutput.append('$')\n\t\t\t\tregex = ''.join(output)\n\n\t\t\telse:\n\t\t\t\t# Use regex override.\n\t\t\t\tregex = override_regex\n\n\t\telse:\n\t\t\t# A blank pattern is a null-operation (neither includes nor excludes\n\t\t\t# files).\n\t\t\tregex = None\n\t\t\tinclude = None\n\n\t\tif regex is not None and return_type is bytes:\n\t\t\tregex = regex.encode(_BYTES_ENCODING)\n\n\t\treturn regex, include\n\n\t@staticmethod\n\tdef _translate_segment_glob(pattern: str) -> str:\n\t\t\"\"\"\n\t\tTranslates the glob pattern to a regular expression. This is used in the\n\t\tconstructor to translate a path segment glob pattern to its corresponding\n\t\tregular expression.\n\n\t\t*pattern* (:class:`str`) is the glob pattern.\n\n\t\tReturns the regular expression (:class:`str`).\n\t\t\"\"\"\n\t\t# NOTE: This is derived from `fnmatch.translate()` and is similar to the\n\t\t# POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.\n\n\t\tescape = False\n\t\tregex = ''\n\t\ti, end = 0, len(pattern)\n\t\twhile i < end:\n\t\t\t# Get next character.\n\t\t\tchar = pattern[i]\n\t\t\ti += 1\n\n\t\t\tif escape:\n\t\t\t\t# Escape the character.\n\t\t\t\tescape = False\n\t\t\t\tregex += re.escape(char)\n\n\t\t\telif char == '\\\\':\n\t\t\t\t# Escape character, escape next character.\n\t\t\t\tescape = True\n\n\t\t\telif char == '*':\n\t\t\t\t# Multi-character wildcard. Match any string (except slashes), including\n\t\t\t\t# an empty string.\n\t\t\t\tregex += '[^/]*'\n\n\t\t\telif char == '?':\n\t\t\t\t# Single-character wildcard. Match any single character (except a\n\t\t\t\t# slash).\n\t\t\t\tregex += '[^/]'\n\n\t\t\telif char == '[':\n\t\t\t\t# Bracket expression wildcard. Except for the beginning exclamation\n\t\t\t\t# mark, the whole bracket expression can be used directly as regex, but\n\t\t\t\t# we have to find where the expression ends.\n\t\t\t\t# - \"[][!]\" matches ']', '[' and '!'.\n\t\t\t\t# - \"[]-]\" matches ']' and '-'.\n\t\t\t\t# - \"[!]a-]\" matches any character except ']', 'a' and '-'.\n\t\t\t\tj = i\n\n\t\t\t\t# Pass bracket expression negation.\n\t\t\t\tif j < end and (pattern[j] == '!' or pattern[j] == '^'):\n\t\t\t\t\tj += 1\n\n\t\t\t\t# Pass first closing bracket if it is at the beginning of the\n\t\t\t\t# expression.\n\t\t\t\tif j < end and pattern[j] == ']':\n\t\t\t\t\tj += 1\n\n\t\t\t\t# Find closing bracket. Stop once we reach the end or find it.\n\t\t\t\twhile j < end and pattern[j] != ']':\n\t\t\t\t\tj += 1\n\n\t\t\t\tif j < end:\n\t\t\t\t\t# Found end of bracket expression. Increment j to be one past the\n\t\t\t\t\t# closing bracket:\n\t\t\t\t\t#\n\t\t\t\t\t# [...]\n\t\t\t\t\t# ^ ^\n\t\t\t\t\t# i j\n\t\t\t\t\t#\n\t\t\t\t\tj += 1\n\t\t\t\t\texpr = '['\n\n\t\t\t\t\tif pattern[i] == '!':\n\t\t\t\t\t\t# Bracket expression needs to be negated.\n\t\t\t\t\t\texpr += '^'\n\t\t\t\t\t\ti += 1\n\t\t\t\t\telif pattern[i] == '^':\n\t\t\t\t\t\t# POSIX declares that the regex bracket expression negation \"[^...]\"\n\t\t\t\t\t\t# is undefined in a glob pattern. Python's `fnmatch.translate()`\n\t\t\t\t\t\t# escapes the caret ('^') as a literal. Git supports the using a\n\t\t\t\t\t\t# caret for negation. Maintain consistency with Git because that is\n\t\t\t\t\t\t# the expected behavior.\n\t\t\t\t\t\texpr += '^'\n\t\t\t\t\t\ti += 1\n\n\t\t\t\t\t# Build regex bracket expression. Escape slashes so they are treated\n\t\t\t\t\t# as literal slashes by regex as defined by POSIX.\n\t\t\t\t\texpr += pattern[i:j].replace('\\\\', '\\\\\\\\')\n\n\t\t\t\t\t# Add regex bracket expression to regex result.\n\t\t\t\t\tregex += expr\n\n\t\t\t\t\t# Set i to one past the closing bracket.\n\t\t\t\t\ti = j\n\n\t\t\t\telse:\n\t\t\t\t\t# Failed to find closing bracket, treat opening bracket as a bracket\n\t\t\t\t\t# literal instead of as an expression.\n\t\t\t\t\tregex += '\\\\['\n\n\t\t\telse:\n\t\t\t\t# Regular character, escape it for regex.\n\t\t\t\tregex += re.escape(char)\n\n\t\tif escape:\n\t\t\traise ValueError(f\"Escape character found with no next character to escape: {pattern!r}\")\n\n\t\treturn regex\n\n\t@staticmethod\n\tdef escape(s: AnyStr) -> AnyStr:\n\t\t\"\"\"\n\t\tEscape special characters in the given string.\n\n\t\t*s* (:class:`str` or :class:`bytes`) a filename or a string that you want to\n\t\tescape, usually before adding it to a \".gitignore\".\n\n\t\tReturns the escaped string (:class:`str` or :class:`bytes`).\n\t\t\"\"\"\n\t\tif isinstance(s, str):\n\t\t\treturn_type = str\n\t\t\tstring = s\n\t\telif isinstance(s, bytes):\n\t\t\treturn_type = bytes\n\t\t\tstring = s.decode(_BYTES_ENCODING)\n\t\telse:\n\t\t\traise TypeError(f\"s:{s!r} is not a unicode or byte string.\")\n\n\t\t# Reference: https://git-scm.com/docs/gitignore#_pattern_format\n\t\tmeta_characters = r\"[]!*#?\"\n\n\t\tout_string = \"\".join(\"\\\\\" + x if x in meta_characters else x for x in string)\n\n\t\tif return_type is bytes:\n\t\t\treturn out_string.encode(_BYTES_ENCODING)\n\t\telse:\n\t\t\treturn out_string"
},
{
"identifier": "_DIR_MARK",
"path": "third-party/pathspec-0.12.1/pathspec/patterns/gitwildmatch.py",
"snippet": "_DIR_MARK = 'ps_d'"
},
{
"identifier": "_is_iterable",
"path": "third-party/pathspec-0.12.1/pathspec/util.py",
"snippet": "def _is_iterable(value: Any) -> bool:\n\t\"\"\"\n\tCheck whether the value is an iterable (excludes strings).\n\n\t*value* is the value to check,\n\n\tReturns whether *value* is a iterable (:class:`bool`).\n\t\"\"\"\n\treturn isinstance(value, IterableType) and not isinstance(value, (str, bytes))"
}
] | from typing import (
AnyStr,
Callable, # Replaced by `collections.abc.Callable` in 3.9.
Iterable, # Replaced by `collections.abc.Iterable` in 3.9.
Optional, # Replaced by `X | None` in 3.10.
Tuple, # Replaced by `tuple` in 3.9.
Type, # Replaced by `type` in 3.9.
TypeVar,
Union, # Replaced by `X | Y` in 3.10.
cast,
overload)
from .pathspec import (
PathSpec)
from .pattern import (
Pattern)
from .patterns.gitwildmatch import (
GitWildMatchPattern,
_DIR_MARK)
from .util import (
_is_iterable) | 8,772 | """
This module provides :class:`.GitIgnoreSpec` which replicates
*.gitignore* behavior.
"""
Self = TypeVar("Self", bound="GitIgnoreSpec")
"""
:class:`GitIgnoreSpec` self type hint to support Python v<3.11 using PEP
673 recommendation.
"""
class GitIgnoreSpec(PathSpec):
"""
The :class:`GitIgnoreSpec` class extends :class:`pathspec.pathspec.PathSpec` to
replicate *.gitignore* behavior.
"""
def __eq__(self, other: object) -> bool:
"""
Tests the equality of this gitignore-spec with *other* (:class:`GitIgnoreSpec`)
by comparing their :attr:`~pathspec.pattern.Pattern`
attributes. A non-:class:`GitIgnoreSpec` will not compare equal.
"""
if isinstance(other, GitIgnoreSpec):
return super().__eq__(other)
elif isinstance(other, PathSpec):
return False
else:
return NotImplemented
# Support reversed order of arguments from PathSpec.
@overload
@classmethod
def from_lines(
cls: Type[Self],
pattern_factory: Union[str, Callable[[AnyStr], Pattern]],
lines: Iterable[AnyStr],
) -> Self:
...
@overload
@classmethod
def from_lines(
cls: Type[Self],
lines: Iterable[AnyStr],
pattern_factory: Union[str, Callable[[AnyStr], Pattern], None] = None,
) -> Self:
...
@classmethod
def from_lines(
cls: Type[Self],
lines: Iterable[AnyStr],
pattern_factory: Union[str, Callable[[AnyStr], Pattern], None] = None,
) -> Self:
"""
Compiles the pattern lines.
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`io.TextIOBase` (e.g., from :func:`open` or
:class:`io.StringIO`) or the result from :meth:`str.splitlines`.
*pattern_factory* can be :data:`None`, the name of a registered
pattern factory (:class:`str`), or a :class:`~collections.abc.Callable`
used to compile patterns. The callable must accept an uncompiled
pattern (:class:`str`) and return the compiled pattern
(:class:`pathspec.pattern.Pattern`).
Default is :data:`None` for :class:`.GitWildMatchPattern`).
Returns the :class:`GitIgnoreSpec` instance.
"""
if pattern_factory is None:
pattern_factory = GitWildMatchPattern
| """
This module provides :class:`.GitIgnoreSpec` which replicates
*.gitignore* behavior.
"""
Self = TypeVar("Self", bound="GitIgnoreSpec")
"""
:class:`GitIgnoreSpec` self type hint to support Python v<3.11 using PEP
673 recommendation.
"""
class GitIgnoreSpec(PathSpec):
"""
The :class:`GitIgnoreSpec` class extends :class:`pathspec.pathspec.PathSpec` to
replicate *.gitignore* behavior.
"""
def __eq__(self, other: object) -> bool:
"""
Tests the equality of this gitignore-spec with *other* (:class:`GitIgnoreSpec`)
by comparing their :attr:`~pathspec.pattern.Pattern`
attributes. A non-:class:`GitIgnoreSpec` will not compare equal.
"""
if isinstance(other, GitIgnoreSpec):
return super().__eq__(other)
elif isinstance(other, PathSpec):
return False
else:
return NotImplemented
# Support reversed order of arguments from PathSpec.
@overload
@classmethod
def from_lines(
cls: Type[Self],
pattern_factory: Union[str, Callable[[AnyStr], Pattern]],
lines: Iterable[AnyStr],
) -> Self:
...
@overload
@classmethod
def from_lines(
cls: Type[Self],
lines: Iterable[AnyStr],
pattern_factory: Union[str, Callable[[AnyStr], Pattern], None] = None,
) -> Self:
...
@classmethod
def from_lines(
cls: Type[Self],
lines: Iterable[AnyStr],
pattern_factory: Union[str, Callable[[AnyStr], Pattern], None] = None,
) -> Self:
"""
Compiles the pattern lines.
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`io.TextIOBase` (e.g., from :func:`open` or
:class:`io.StringIO`) or the result from :meth:`str.splitlines`.
*pattern_factory* can be :data:`None`, the name of a registered
pattern factory (:class:`str`), or a :class:`~collections.abc.Callable`
used to compile patterns. The callable must accept an uncompiled
pattern (:class:`str`) and return the compiled pattern
(:class:`pathspec.pattern.Pattern`).
Default is :data:`None` for :class:`.GitWildMatchPattern`).
Returns the :class:`GitIgnoreSpec` instance.
"""
if pattern_factory is None:
pattern_factory = GitWildMatchPattern
| elif (isinstance(lines, (str, bytes)) or callable(lines)) and _is_iterable(pattern_factory): | 4 | 2023-12-14 07:43:03+00:00 | 12k |
pan-x-c/EE-LLM | tests/unit_tests/transformer/test_spec_customization.py | [
{
"identifier": "get_bias_dropout_add",
"path": "megatron/core/fusions/fused_bias_dropout.py",
"snippet": "def get_bias_dropout_add(training, fused):\n if fused:\n # jit scripting for a nn.module (with dropout) is not\n # triggering the fusion kernel. For now, we use two\n # different nn.functional routines to account for varying\n # dropout semantics during training and inference phases.\n if training:\n return bias_dropout_add_fused_train\n else:\n return bias_dropout_add_fused_inference\n else:\n return bias_dropout_add_unfused(training)"
},
{
"identifier": "model_parallel_cuda_manual_seed",
"path": "megatron/core/tensor_parallel/random.py",
"snippet": "def model_parallel_cuda_manual_seed(seed):\n \"\"\"Initialize model parallel cuda seed.\n\n This function should be called after the model parallel is\n initialized. Also, no torch.cuda.manual_seed should be called\n after this function. Basically, this is replacement for that\n function.\n Two set of RNG states are tracked:\n default state: This is for data parallelism and is the same among a\n set of model parallel GPUs but different across\n different model paralle groups. This is used for\n example for dropout in the non-tensor-model-parallel regions.\n tensor-model-parallel state: This state is different among a set of model\n parallel GPUs, but the same across data parallel\n groups. This is used for example for dropout in\n model parallel regions.\n \"\"\"\n # 2718 is just for fun and any POSITIVE value will work.\n offset = seed + 2718\n tensor_model_parallel_seed = offset + get_tensor_model_parallel_rank()\n # Data parallel gets the original seed.\n data_parallel_seed = seed\n\n _CUDA_RNG_STATE_TRACKER.reset()\n # Set the default state.\n torch.cuda.manual_seed(data_parallel_seed)\n # and model parallel state.\n _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, tensor_model_parallel_seed)\n\n expert_parallel_seed = (\n seed + 100 * get_expert_model_parallel_rank() + get_tensor_model_parallel_rank()\n )\n _CUDA_RNG_STATE_TRACKER.add(_EXPERT_PARALLEL_RNG_TRACKER_NAME, expert_parallel_seed)"
},
{
"identifier": "SelfAttention",
"path": "megatron/core/transformer/attention.py",
"snippet": "class SelfAttention(Attention):\n \"\"\"Self-attention layer class\n\n Self-attention layer takes input with size [s, b, h]\n and returns output of the same size.\n \"\"\"\n\n def __init__(\n self,\n config: TransformerConfig,\n submodules: SelfAttentionSubmodules,\n layer_number: int = 1,\n attn_mask_type=AttnMaskType.padding,\n **kwargs,\n ):\n super().__init__(\n config=config,\n submodules=submodules,\n layer_number=layer_number,\n attn_mask_type=attn_mask_type,\n **kwargs,\n )\n\n self.linear_qkv = build_module(\n submodules.linear_qkv,\n self.config.hidden_size,\n self.query_projection_size + 2 * self.kv_projection_size,\n config=self.config,\n init_method=self.config.init_method,\n bias=self.config.add_bias_linear,\n skip_bias_add=False,\n )\n\n def get_query_key_value_tensors(self, hidden_states, key_value_states=None):\n \"\"\"\n Derives `query`, `key` and `value` tensors from `hidden_states`.\n \"\"\"\n # Attention heads [sq, b, h] --> [sq, b, ng * (np/ng + 2) * hn)]\n mixed_qkv, _ = self.linear_qkv(hidden_states)\n\n # [sq, b, hp] --> [sq, b, ng, (np/ng + 2) * hn]\n new_tensor_shape = mixed_qkv.size()[:-1] + (\n self.num_query_groups_per_partition,\n (\n (self.num_attention_heads_per_partition // self.num_query_groups_per_partition + 2)\n * self.hidden_size_per_attention_head\n ),\n )\n mixed_qkv = mixed_qkv.view(*new_tensor_shape)\n\n # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn]\n (query, key, value) = torch.split(\n mixed_qkv,\n [\n (\n self.num_attention_heads_per_partition\n // self.num_query_groups_per_partition\n * self.hidden_size_per_attention_head\n ),\n self.hidden_size_per_attention_head,\n self.hidden_size_per_attention_head,\n ],\n dim=3,\n )\n # [sq, b, ng, np/ng * hn] -> [sq, b, np, hn]\n query = query.reshape(query.size(0), query.size(1), -1, self.hidden_size_per_attention_head)\n\n return query, key, value"
},
{
"identifier": "SelfAttentionSubmodules",
"path": "megatron/core/transformer/attention.py",
"snippet": "class SelfAttentionSubmodules:\n linear_qkv: Union[ModuleSpec, type] = None\n dot_product_attention: Union[ModuleSpec, type] = None\n linear_proj: Union[ModuleSpec, type] = None"
},
{
"identifier": "TEDotProductAttention",
"path": "megatron/core/transformer/custom_layers/transformer_engine.py",
"snippet": "class TEDotProductAttention(te.pytorch.DotProductAttention):\n \"\"\"\n Wrapper for the Transformer-Engine's `DotProductAttention` layer that also\n has \"flash attention\" enabled.\n\n Note that if Megatron's parallel_state has not been initialized yet, the\n tp_group and cp_group passed to TE will be None and must be set later\n via set_tensor_parallel_group() and set_context_parallel_group().\n \"\"\"\n\n cp_stream: torch.cuda.Stream = None\n\n def __init__(\n self,\n config: TransformerConfig,\n layer_number: int = 1,\n attn_mask_type: AttnMaskType = AttnMaskType.padding,\n **kwargs\n ):\n self.config = config\n\n # Only Transformer-Engine version > 0.13.0 supports context parallelism\n te_version = packaging.version.Version(version(\"transformer-engine\"))\n if te_version > packaging.version.Version(\"0.13.0\"):\n if getattr(TEDotProductAttention, \"cp_stream\") is None:\n TEDotProductAttention.cp_stream = torch.cuda.Stream()\n kwargs[\"cp_group\"] = get_context_parallel_group(check_initialized=False)\n kwargs[\"cp_global_ranks\"] = get_context_parallel_global_ranks(check_initialized=False)\n kwargs[\"cp_stream\"] = TEDotProductAttention.cp_stream\n else:\n assert (\n self.config.context_parallel_size == 1\n ), \"Only Transformer-Engine version > 0.13.0 supports context parallelism\"\n\n super().__init__(\n num_attention_heads=self.config.num_attention_heads,\n kv_channels=self.config.kv_channels,\n attention_dropout=self.config.attention_dropout,\n layer_number=layer_number,\n attn_mask_type=attn_mask_type.name,\n sequence_parallel=self.config.sequence_parallel,\n tp_size=self.config.tensor_model_parallel_size,\n get_rng_state_tracker=get_cuda_rng_tracker,\n tp_group=get_tensor_model_parallel_group(check_initialized=False),\n **kwargs,\n )"
},
{
"identifier": "TELayerNormColumnParallelLinear",
"path": "megatron/core/transformer/custom_layers/transformer_engine.py",
"snippet": "class TELayerNormColumnParallelLinear(te.pytorch.LayerNormLinear):\n \"\"\"\n Wrapper for the Transformer-Engine's `LayerNormLinear` layer that combines\n layernorm and linear layers\n \"\"\"\n\n def __init__(\n self,\n input_size: int,\n output_size: int,\n config: TransformerConfig,\n init_method: Callable,\n bias: bool,\n skip_bias_add: bool,\n **kwargs\n ):\n self.config = config\n # TE returns a zero length Tensor when bias=False and\n # return_bias=True, but we prefer None. So in that case we\n # tell TE to not return the bias, and return None\n # ourselves. This way our forward always returns two values\n # and we don't have to deal with the zero length Tensor.\n self.te_return_bias = skip_bias_add and bias\n\n # Only Transformer-Engine version >= 0.11.0 supports `RMSNorm`\n te_version = packaging.version.Version(version(\"transformer-engine\"))\n if te_version >= packaging.version.Version(\"0.11.0\"):\n kwargs[\"normalization\"] = self.config.normalization\n\n super().__init__(\n in_features=input_size,\n out_features=output_size,\n bias=bias,\n sequence_parallel=self.config.sequence_parallel,\n fuse_wgrad_accumulation=self.config.gradient_accumulation_fusion,\n tp_group=get_tensor_model_parallel_group(check_initialized=False),\n tp_size=self.config.tensor_model_parallel_size,\n get_rng_state_tracker=get_cuda_rng_tracker,\n init_method=init_method,\n params_dtype=self.config.params_dtype,\n parallel_mode=\"column\",\n return_bias=self.te_return_bias,\n **_get_extra_te_kwargs(config),\n )\n\n def forward(self, x):\n out = super().forward(x)\n\n # TE only returns a tuple when return_bias is True, otherwise\n # it returns a single Tensor, we always want to return two\n # values regardless of the arguments.\n if self.te_return_bias:\n return out\n return out, None"
},
{
"identifier": "TENorm",
"path": "megatron/core/transformer/custom_layers/transformer_engine.py",
"snippet": "class TENorm:\n \"\"\"\n A conditional wrapper to initialize an instance of Transformer-Engine's\n `LayerNorm` or `RMSNorm` based on input\n \"\"\"\n\n def __new__(\n cls,\n config: TransformerConfig,\n hidden_size: int,\n eps: float = 1e-5,\n sequence_parallel: bool = False,\n normalization: str = \"LayerNorm\",\n **kwargs\n ):\n if normalization == \"LayerNorm\":\n instance = te.pytorch.LayerNorm(\n hidden_size=hidden_size,\n eps=eps,\n sequence_parallel=sequence_parallel,\n **_get_extra_te_kwargs(config),\n )\n elif normalization == \"RMSNorm\":\n assert hasattr(\n te.pytorch, \"RMSNorm\"\n ), \"Transformer-Engine >= v0.11 required to use this feature\"\n instance = te.pytorch.RMSNorm(\n hidden_size=hidden_size,\n eps=eps,\n sequence_parallel=sequence_parallel,\n **_get_extra_te_kwargs(config),\n )\n else:\n raise Exception('Only LayerNorm and RMSNorm are curently supported')\n\n return instance"
},
{
"identifier": "TERowParallelLinear",
"path": "megatron/core/transformer/custom_layers/transformer_engine.py",
"snippet": "class TERowParallelLinear(TELinear):\n \"\"\"\n Wrapper for the Transformer-Engine's `Linear` layer but specialized similar\n to megatron's `RowParallelLinear` layer.\n \"\"\"\n\n def __init__(self, input_size: int, output_size: int, config: TransformerConfig, **kwargs):\n self.config = config\n super().__init__(\n input_size=input_size,\n output_size=output_size,\n config=self.config,\n parallel_mode=\"row\",\n **kwargs,\n )"
},
{
"identifier": "AttnMaskType",
"path": "megatron/core/transformer/enums.py",
"snippet": "class AttnMaskType(enum.Enum):\n padding = 1\n causal = 2"
},
{
"identifier": "IdentityFuncOp",
"path": "megatron/core/transformer/identity_op.py",
"snippet": "class IdentityFuncOp(IdentityOp):\n \"\"\"\n This is a placeholder for IdentityFuncOp(...)(x) -> IdentityOp(x) -> x.\n Such a func is handy for ops like `bias_dropout_fusion` which themselves\n return a function at runtime based on passed arguments\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, *args, **kwargs):\n return super().forward"
},
{
"identifier": "IdentityOp",
"path": "megatron/core/transformer/identity_op.py",
"snippet": "class IdentityOp(torch.nn.Module):\n \"\"\"\n This is a placeholder for IdentityOp(x) -> x\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x"
},
{
"identifier": "ModuleSpec",
"path": "megatron/core/transformer/spec_utils.py",
"snippet": "class ModuleSpec:\n \"\"\"This is a Module Specification dataclass.\n\n Specification defines the location of the module (to import dynamically)\n or the imported module itself. It also defines the params that need to be\n passed to initialize the module.\n\n Args:\n module (Union[Tuple, type]): A tuple describing the location of the\n module class e.g. `(module.location, ModuleClass)` or the imported\n module class itself e.g. `ModuleClass` (which is already imported\n using `from module.location import ModuleClass`).\n params (dict): A dictionary of params that need to be passed while init.\n\n \"\"\"\n\n module: Union[Tuple, type]\n params: dict = field(default_factory=lambda: {})\n submodules: type = None"
},
{
"identifier": "build_module",
"path": "megatron/core/transformer/spec_utils.py",
"snippet": "def build_module(spec_or_module: Union[ModuleSpec, type], *args, **kwargs):\n # If the passed `spec_or_module` is\n # a `Function`, then return it as it is\n # NOTE: to support an already initialized module add the following condition\n # `or isinstance(spec_or_module, torch.nn.Module)` to the following if check\n if isinstance(spec_or_module, types.FunctionType):\n return spec_or_module\n\n # If the passed `spec_or_module` is actually a spec (instance of\n # `ModuleSpec`) and it specifies a `Function` using its `module`\n # field, return the `Function` as it is\n if isinstance(spec_or_module, ModuleSpec) and isinstance(\n spec_or_module.module, types.FunctionType\n ):\n return spec_or_module.module\n\n # Check if a module class is provided as a spec or if the module path\n # itself is a class\n if isinstance(spec_or_module, type):\n module = spec_or_module\n elif hasattr(spec_or_module, \"module\") and isinstance(spec_or_module.module, type):\n module = spec_or_module.module\n else:\n # Otherwise, dynamically import the module from the module path\n module = import_module(spec_or_module.module)\n\n # If the imported module is actually a `Function` return it as it is\n if isinstance(module, types.FunctionType):\n return module\n\n # Finally return the initialized module with params from the spec as well\n # as those passed as **kwargs from the code\n\n # Add the `submodules` argument to the module init call if it exists in the\n # spec.\n if hasattr(spec_or_module, \"submodules\") and spec_or_module.submodules is not None:\n kwargs[\"submodules\"] = spec_or_module.submodules\n\n return module(\n *args, **spec_or_module.params if hasattr(spec_or_module, \"params\") else {}, **kwargs\n )"
},
{
"identifier": "import_module",
"path": "megatron/core/transformer/spec_utils.py",
"snippet": "def import_module(module_path: Tuple[str]):\n \"\"\"Import a named object from a module in the context of this function.\n\n TODO: make this importer module more robust, at least make sure there\n are no side effects of using this as is\n \"\"\"\n base_path, name = module_path\n try:\n module = __import__(base_path, globals(), locals(), [name])\n except ImportError as e:\n print(f\"couldn't import module due to {e}\")\n return None\n return vars(module)[name]"
},
{
"identifier": "TransformerConfig",
"path": "megatron/core/transformer/transformer_config.py",
"snippet": "class TransformerConfig(ModelParallelConfig):\n \"\"\"Configuration object for megatron-core transformers.\n\n Attributes:\n\n # model architecture\n num_layers (int): Number of transformer layers in a transformer block.\n hidden_size (int): Transformer hidden size.\n ffn_hidden_size (int): Transformer Feed-Forward Network hidden size.\n This is set to 4*hidden_size if not provided. Defaults to None.')\n num_attention_heads (int): Number of transformer attention heads.\n kv_channels (int): Projection weights dimension in multi-head attention.\n This is set to hidden_size // num_attention_heads if not provided.\n Defaults to None.\n num_query_groups (int): Number of query groups for group query attention. If None, normal attention is used.\n\n hidden_dropout (float): Dropout probability for transformer hidden state. Defaults to 0.1.\n attention_dropout (float): Post attention dropout probability. Defaults to 0.1.\n fp32_residual_connection (bool): If true, move residual connections to fp32.\n apply_residual_connection_post_layernorm (bool): If true, uses the original BERT residule connection ordering.\n Defaults to False.\n layernorm_epsilon (float): Layernorm epsilon. Defaults to 1e-5.\n\n layernorm_zero_centered_gamma (bool): if set to 'True', the LayerNorm is adjusted to center the gamma values\n around 0. This improves numerical stability. Defaults to False.\n\n add_bias_linear (bool): Include a bias term in all linear layers (QKV projections, after core attention, and two\n in MLP layer). Default is True.\n\n gated_linear_unit (bool): Use a gated linear unit for the first linear layer in the MLP. Defaults to False.\n\n activation_func (Callable): Activation function to use for the non-linearity in the MLP. Defaults to F.gelu.\n\n num_moe_experts (int): Number of experts to use for Mixture of Experts. \n When set, it replaces MLP with Switch MLP. Defaults to None (no MoE).\n\n # initialization\n init_method (Callable): Method to initialize weights. Note that bias is always set to\n zero. Should be a function that takes a single Tensor and\n initializes it. Defaults to\n megatron.core.utils.init_method_normal(init_method_std) which is\n torch.nn.init.normal_ with mean=0.0 and std=init_method_Std.\n\n output_layer_init_method (Callable): Method to initialize weights of the output layer of\n both attention and MLP blocks. Defaults to\n megatron.core.utils.scaled_init_method_normal(init_method_std)\n which is torch.nn.init.normal_ with mean=0.0 and\n std=init_method_std / math.sqrt(2.0 * num_layers).\n\n init_method_std (float): Standard deviation of the zero mean normal for the default\n initialization method, not used if init_method and\n output_layer_init_method are provided. Defaults to 0.02.\n\n # mixed-precision\n apply_query_key_layer_scaling (bool): If true, scale Q * K^T by 1 / layer-number. Defaults to True.\n attention_softmax_in_fp32 (bool): If true, run attention masking and softmax in fp32.\n This should be true if apply_query_key_layer_scaling is true.\n\n # fusion\n bias_gelu_fustion (bool): If true, fuses bias and gelu. Defaults to False.\n masked_softmax_fusion (bool): If true, uses softmax fusion.\n persist_layer_norm (bool): If true, uses the persistent fused layer norm kernel.\n This kernel only supports a fixed set of hidden sizes.\n Defaults to False.\n bias_dropout_fusion (bool): If true, uses bias dropout fusion.\n\n # activation recomputation\n\n recompute_granularity (str): megatron-core supports 'selective' activation checkpointing where only the memory\n intensive part of attention is checkpointed. These memory intensive activations\n are also less compute intensive which makes activation checkpointing more efficient\n for LLMs (20B+). See Reducing Activation Recomputation in Large Transformer\n Models: https://arxiv.org/abs/2205.05198 for more details. 'full' will checkpoint\n the entire transformer layer. Must be 'selective' or 'full'. 'selective' always uses all layers.\n Defaults to None.\n\n recompute_method (str): uniform will uniformly divide the total number of transformer layers in a transformer\n block and recompute the input activation of each divided chunk at the specified\n granularity. block will recompute the input activations for only a set number of\n transformer layers per pipeline stage. The rest of the layers in the pipeline stage\n will not have any activations recomputed. Must be 'uniform' or 'block'. Defaults to\n None.\n\n recompute_num_layers (int): When recompute_method is uniform, recompute_num_layers is the number of transformer\n layers in each uniformly divided recompute unit. When recompute_method is block,\n recompute_num_layers is the number of transformer layers to recompute within each\n pipeline stage. Must be None for 'selective' activation checkpointing. Defaults to None.\n\n distribute_saved_activations (bool): If true, distribute recomputed activations across the model parallel\n group. Defaults to None.\n\n # fp8 related (via Transformer Engine). For detailed info, refer the the Transformer Engine docs at\n # https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html\n\n fp8 (str): If set, enables the use of FP8 precision through Transformer Engine. There are 2 predefined choices: (1) 'e4m3'\n uniformly uses e4m3 for all FP8 tensors, (2) 'hybrid' uses e4m3 for all FP8 activation and weight tensors and\n e5m2 for all FP8 output activation gradient tensors. Defaults to None.\n\n fp8_margin (int): Margin for the scaling factor computation.\n\n fp8_interval (int): Controls how often the scaling factor is recomputed.\n\n fp8_amax_history_len (int): The length of the amax history window used for scaling factor computation.\n\n fp8_amax_compute_algo (str): Algorithm used for choosing the `amax` value for the scaling factor computation.\n There are 2 predefined choices: `max` chooses the largest `amax` in the history\n window, while `most_recent` always chooses the most recently seen value.\n\n fp8_wgrad (bool): When set to False, override FP8 config options and do the wgrad computation in higher precision.\n Defaults to True.\n\n # Experimental\n normalization (str): Swtich b/w `LayerNorm` and `RMSNorm` as normalization layers. For now, these are primarily\n used by Transformer-Engine's layers like `LayerNormLinear`. Default value is `LayerNorm`.\n\n\n \"\"\"\n\n # model architecture\n num_layers: int = 0\n hidden_size: int = 0\n num_attention_heads: int = 0\n num_query_groups: int = None\n\n ffn_hidden_size: int = None\n kv_channels: int = None\n hidden_dropout: float = 0.1\n attention_dropout: float = 0.1\n fp32_residual_connection: bool = False\n # @jcasper should we keep this option?\n apply_residual_connection_post_layernorm: bool = False\n layernorm_epsilon: float = 1e-5\n layernorm_zero_centered_gamma: bool = False\n add_bias_linear: bool = True\n gated_linear_unit: bool = False\n activation_func: Callable = F.gelu\n num_moe_experts: int = None\n\n # initialization\n init_method: Callable = None\n output_layer_init_method: Callable = None\n init_method_std: float = 0.02\n\n # mixed-precision\n apply_query_key_layer_scaling: bool = True\n attention_softmax_in_fp32: bool = True\n\n # communication\n\n # fusion\n bias_gelu_fusion: bool = False # TODO: this should be bias_activation_fusion ?\n masked_softmax_fusion: bool = False\n persist_layer_norm: bool = False\n bias_dropout_fusion: bool = False # TODO: this should be bias_dropout_add_fusion?\n\n # activation recomputation\n recompute_granularity: str = None\n recompute_method: str = None\n recompute_num_layers: int = None\n distribute_saved_activations: bool = None\n\n # fp8 related\n fp8: str = None\n fp8_margin: int = 0\n fp8_interval: int = 1\n fp8_amax_history_len: int = 1\n fp8_amax_compute_algo: str = \"most_recent\"\n fp8_wgrad: bool = True\n\n # experimental section (TODO: move to apt. section above once stable)\n normalization: bool = \"LayerNorm\" # alt value supported by TE: \"RMSNorm\"\n\n def __post_init__(self):\n \"\"\" Python dataclass method that is used to modify attributes after initialization.\n See https://docs.python.org/3/library/dataclasses.html#post-init-processing for more details.\n \"\"\"\n super().__post_init__()\n if self.fp16 and self.bf16:\n raise ValueError(\n f'Only one of self.fp16: {self.fp16} and self.bf16 {self.bf16} should be True.'\n )\n\n if self.num_attention_heads % self.tensor_model_parallel_size != 0:\n raise ValueError(\n f\"num_attention_heads ({self.num_attention_heads}) must be a multiple of \"\n f\"tensor_model_parallel_size ({self.tensor_model_parallel_size}).\"\n )\n\n if self.ffn_hidden_size is None:\n self.ffn_hidden_size = 4 * self.hidden_size\n\n if self.kv_channels is None:\n self.kv_channels = self.hidden_size // self.num_attention_heads\n\n if self.num_query_groups is None:\n self.num_query_groups = self.num_attention_heads\n\n if self.num_query_groups % self.tensor_model_parallel_size != 0:\n raise ValueError(\n f\"num_query_groups ({self.num_query_groups}) must be a multiple of \"\n f\"tensor_model_parallel_size ({self.tensor_model_parallel_size}).\"\n )\n\n if self.apply_query_key_layer_scaling:\n self.attention_softmax_in_fp32 = True\n\n if self.expert_model_parallel_size > 1 and self.num_moe_experts is None:\n raise ValueError(f'num_moe_experts must be non None to use expert-parallel.')\n\n if self.recompute_granularity is not None:\n if not self.recompute_granularity in ['full', 'selective']:\n raise ValueError(\n f'When using recompute_granuarlity: {self.recompute_granularity} must be \"full\" or \"selective\".'\n )\n\n if self.recompute_method is not None:\n if not self.recompute_method in ['block', 'uniform']:\n raise ValueError(\n f'recompute_method: {self.recompute_method} must be \"block\" or \"uniform\".'\n )\n elif self.recompute_granularity != 'selective':\n raise ValueError(\n f'Using recompute_granularity: {self.recompute_granularity} so recompute_method must be \"block\" or \"uniform\"'\n )\n\n if self.recompute_granularity != 'selective' and self.recompute_num_layers is None:\n raise ValueError(\n f'When using recompute_granularity: {self.recompute_granularity} recompute_num_layers must be between '\n f'1 and num_layers_per_pipeline_rank: {self.num_layers // self.pipeline_model_parallel_size}'\n )\n elif (\n self.recompute_granularity == 'selective' and self.recompute_num_layers is not None\n ):\n raise ValueError(\n f'When using recompute_granularity: {self.recompute_granularity} recompute_num_layers must be None.'\n )\n\n if self.distribute_saved_activations and self.sequence_parallel:\n raise ValueError(\n f'distribute_saved_activations: {self.distribute_saved_activations} must be false when sequence parallel is enabled: {self.sequence_parallel}'\n )\n\n if self.virtual_pipeline_model_parallel_size is not None:\n if not self.num_layers % self.virtual_pipeline_model_parallel_size == 0:\n raise ValueError(\n f'num_layers: {self.num_layers} must be divisible by virtual_model_parallel_size {self.virtual_pipeline_model_parallel_size}'\n )\n\n if self.apply_query_key_layer_scaling:\n self.attention_softmax_in_fp32 = True\n\n if self.bias_gelu_fusion:\n if not self.add_bias_linear:\n raise ValueError(\n \"When bias_gelu_fusion is True, add_bias_linear must also be True.\"\n )\n\n if self.activation_func != F.gelu:\n raise ValueError(f'When bias_gelu_fusion is True, activation_func must be F.gelu.')\n\n if self.init_method is None:\n self.init_method = init_method_normal(self.init_method_std)\n\n if self.output_layer_init_method is None:\n self.output_layer_init_method = scaled_init_method_normal(\n self.init_method_std, self.num_layers\n )"
},
{
"identifier": "TransformerLayerSubmodules",
"path": "megatron/core/transformer/transformer_layer.py",
"snippet": "class TransformerLayerSubmodules:\n input_layernorm: Union[ModuleSpec, type] = IdentityOp\n self_attention: Union[ModuleSpec, type] = IdentityOp\n self_attn_bda: Union[ModuleSpec, type] = IdentityFuncOp\n\n pre_cross_attn_layernorm: Union[ModuleSpec, type] = IdentityOp\n cross_attention: Union[ModuleSpec, type] = IdentityOp\n cross_attn_bda: Union[ModuleSpec, type] = IdentityFuncOp\n\n pre_mlp_layernorm: Union[ModuleSpec, type] = IdentityOp\n mlp: Union[ModuleSpec, type] = IdentityOp\n mlp_bda: Union[ModuleSpec, type] = IdentityFuncOp"
},
{
"identifier": "Utils",
"path": "tests/unit_tests/test_utilities.py",
"snippet": "class Utils:\n\n world_size = torch.cuda.device_count()\n rank = int(os.environ['LOCAL_RANK'])\n\n @staticmethod\n def initialize_distributed():\n print(f'Initializing torch.distributed with rank: {Utils.rank}, world_size: {Utils.world_size}')\n torch.cuda.set_device(Utils.rank % torch.cuda.device_count())\n init_method = 'tcp://'\n master_ip = os.getenv('MASTER_ADDR', 'localhost')\n master_port = os.getenv('MASTER_PORT', '6000')\n init_method += master_ip + ':' + master_port\n torch.distributed.init_process_group(backend='nccl', world_size=Utils.world_size, rank=Utils.rank, init_method=init_method)\n \n @staticmethod\n def destroy_model_parallel():\n ps.destroy_model_parallel()\n torch.distributed.barrier()\n\n @staticmethod\n def initialize_model_parallel(tensor_model_parallel_size = 1, pipeline_model_parallel_size = 1, virtual_pipeline_model_parallel_size = None, pipeline_model_parallel_split_rank = None):\n ps.destroy_model_parallel()\n if not torch.distributed.is_initialized():\n Utils.initialize_distributed()\n ps.initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank)"
}
] | from dataclasses import dataclass, fields
from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules
from megatron.core.transformer.custom_layers.transformer_engine import (
TEDotProductAttention,
TELayerNormColumnParallelLinear,
TENorm,
TERowParallelLinear,
)
from megatron.core.transformer.enums import AttnMaskType
from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp
from megatron.core.transformer.spec_utils import ModuleSpec, build_module, import_module
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.transformer_layer import TransformerLayerSubmodules
from tests.unit_tests.test_utilities import Utils
import pytest
import torch
import transformer_engine as te | 8,079 | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
class TestSpecCustomization:
def setup_method(self, method):
Utils.initialize_model_parallel(1, 1)
model_parallel_cuda_manual_seed(123)
self.config = TransformerConfig(
num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True
)
# specify Transformer Layer spec with all identity ops
self.transformer_layer_spec = TransformerLayerSubmodules()
# specify attention spec using already imported class
self.attention_spec = ModuleSpec(
module=SelfAttention,
params={"attn_mask_type": AttnMaskType.causal},
submodules=SelfAttentionSubmodules(
linear_qkv=TELayerNormColumnParallelLinear,
dot_product_attention=TEDotProductAttention,
linear_proj=TERowParallelLinear
),
)
# specify layernorm spec with module path to test dynamic importing
self.layernorm_spec = ModuleSpec(
module=("megatron.core.transformer.custom_layers.transformer_engine", "TENorm"),
)
# specify bias dropout add with module path
self.bda_spec = ModuleSpec(
module=("megatron.core.fusions.fused_bias_dropout", "get_bias_dropout_add")
)
def teardown_method(self, method):
Utils.destroy_model_parallel()
def test_import_module(self):
self_attention_cls = import_module(
module_path=('megatron.core.transformer.attention', 'SelfAttention')
)
assert id(self_attention_cls) == id(SelfAttention)
layernorm_cls = import_module(module_path=self.layernorm_spec.module)
assert id(layernorm_cls) == id(TENorm)
def test_build_module(self):
# Check NoOp TransformerLayer
random_input = 12
noop_transformer_layer = [
build_module(getattr(self.transformer_layer_spec, field.name))
for field in fields(self.transformer_layer_spec)
]
x = random_input
for mod in noop_transformer_layer:
# checking for `IdentityFuncOp` before `IdentityOp` because former
# is derived from the latter and so the second if statement will
# always be `True`.
if isinstance(mod, IdentityFuncOp):
x = mod()(x)
| # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
class TestSpecCustomization:
def setup_method(self, method):
Utils.initialize_model_parallel(1, 1)
model_parallel_cuda_manual_seed(123)
self.config = TransformerConfig(
num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True
)
# specify Transformer Layer spec with all identity ops
self.transformer_layer_spec = TransformerLayerSubmodules()
# specify attention spec using already imported class
self.attention_spec = ModuleSpec(
module=SelfAttention,
params={"attn_mask_type": AttnMaskType.causal},
submodules=SelfAttentionSubmodules(
linear_qkv=TELayerNormColumnParallelLinear,
dot_product_attention=TEDotProductAttention,
linear_proj=TERowParallelLinear
),
)
# specify layernorm spec with module path to test dynamic importing
self.layernorm_spec = ModuleSpec(
module=("megatron.core.transformer.custom_layers.transformer_engine", "TENorm"),
)
# specify bias dropout add with module path
self.bda_spec = ModuleSpec(
module=("megatron.core.fusions.fused_bias_dropout", "get_bias_dropout_add")
)
def teardown_method(self, method):
Utils.destroy_model_parallel()
def test_import_module(self):
self_attention_cls = import_module(
module_path=('megatron.core.transformer.attention', 'SelfAttention')
)
assert id(self_attention_cls) == id(SelfAttention)
layernorm_cls = import_module(module_path=self.layernorm_spec.module)
assert id(layernorm_cls) == id(TENorm)
def test_build_module(self):
# Check NoOp TransformerLayer
random_input = 12
noop_transformer_layer = [
build_module(getattr(self.transformer_layer_spec, field.name))
for field in fields(self.transformer_layer_spec)
]
x = random_input
for mod in noop_transformer_layer:
# checking for `IdentityFuncOp` before `IdentityOp` because former
# is derived from the latter and so the second if statement will
# always be `True`.
if isinstance(mod, IdentityFuncOp):
x = mod()(x) | elif isinstance(mod, IdentityOp): | 10 | 2023-12-07 08:29:38+00:00 | 12k |
mitrefireline/simharness | simharness2/environments/reactive_marl.py | [
{
"identifier": "ReactiveHarnessAnalytics",
"path": "simharness2/analytics/harness_analytics.py",
"snippet": "class ReactiveHarnessAnalytics(RLHarnessAnalytics):\n \"\"\"TODO Add description.\"\"\"\n\n def __init__(\n self,\n *,\n sim: FireSimulation,\n sim_analytics_partial: partial,\n agent_ids: set,\n benchmark_sim: FireSimulation = None,\n ) -> None:\n \"\"\"TODO Add summary line.\n\n Arguments:\n sim: The underlying `FireSimulation` object that contains the agent (s) that\n are being trained. The agent (s) will place mitigation lines, and the\n simulation will spread the fire. An episode terminates when the fire is\n finished spreading.\n sim_analytics_partial: A `functools.partial` object that defines the class\n that willbbe used to monitor and track `self.sim`, and\n `self.benchmark_sim`, if the optional `benchmark_sim` is provided. The\n user is expected to provide the `agent_analytics_partial` keyword\n argument, along with a valid value.\n agent_ids: TODO\n benchmark_sim: A separate `FireSimulation` object, identical to\n `sim` (after initialization). No mitigation lines will be placed in this\n simulation, as it does not contain any agent (s).\n\n Raises:\n TypeError: If `sim_analytics_partial.keywords` does not contain a\n `agent_analytics_partial` key with value of type `functools.partial`.\n\n \"\"\"\n # NOTE: Below is a hacky way to specify agent ids; Fix later\n # Inject `agent_ids` into keywords of `agent_analytics_partial`\n agent_partial: partial = sim_analytics_partial.keywords[\"agent_analytics_partial\"]\n agent_partial.keywords.update({\"agent_ids\": agent_ids})\n sim_analytics_partial.keywords[\"agent_analytics_partial\"] = agent_partial\n # Initialize sim_analytics object (s) and best_episode_performance attribute.\n super().__init__(\n sim=sim,\n sim_analytics_partial=sim_analytics_partial,\n benchmark_sim=benchmark_sim,\n )\n\n # Define attributes that are needed/accessed within `ComprehensiveReward` class.\n # TODO: Address where these attributes should be stored, see\n # https://gitlab.mitre.org/fireline/reinforcementlearning/simharness2/-/merge_requests/6#note_1504742\n\n if self.benchmark_sim_analytics:\n # track the existence of the benchmark sim to generate the comparative (ex. area saved or burn rate reduction) metrics\n self.sim_analytics.benchmark_exists = True\n\n # Track the latest episode reward\n # TODO is this the reward for the latest timestep or the latest episode?\n # FIXME: Decide how and where this attribute is/should be used.\n self.latest_reward = 0.0\n\n self.episodes_total = 0\n\n def update_after_one_agent_step(\n self,\n *,\n timestep: int,\n agents: Dict[Any, ReactiveAgent],\n ) -> None:\n \"\"\"Updates `self.sim_analytics.agent_analytics`, if agents are in the sim.\n\n This method is intended to be called directly after the call to\n `ReactiveHarness._do_one_agent_step()` (within `ReactiveHarness.step()`).\n\n Arguments:\n sim: The underlying `FireSimulation` object that contains the agent (s) that\n are being trained. The agent (s) will place mitigation lines, and the\n simulation will spread the fire. An episode terminates when the fire is\n finished spreading. (FIXME later)\n timestep: An integer indicating the current timestep of the episode.\n agents: TODO\n \"\"\"\n if self.sim_analytics.agent_analytics:\n self.sim_analytics.agent_analytics.update(timestep, agents)\n\n def update_after_one_simulation_step(self, *, timestep: int) -> None:\n \"\"\"Updates `self.sim_analytics` (and `self.benchmark_sim_analytics`, if exists).\n\n This method is intended to be called directly after the call to\n `ReactiveHarness._do_one_simulation_step()` (within `ReactiveHarness.step()`).\n\n Arguments:\n timestep: An integer indicating the current timestep of the episode.\n \"\"\"\n sim_area = self.sim_analytics.sim.fire_map.size\n\n if self.sim_analytics.benchmark_exists:\n # update the sim metrics with comparison metrics that use the benchmark sim in sim_analytics\n self.sim_analytics.update(\n timestep, benchmark_data=self.benchmark_sim_analytics.data.damaged\n )\n else:\n self.sim_analytics.update(timestep)\n\n def update_bench_after_one_simulation_step(self, *, timestep: int) -> None:\n \"\"\"Updates `self.benchmark_sim_analytics`, if exists.\n\n This method is intended to be called at the beginning of each episode in\n ReactiveHarness.\n\n Arguments:\n timestep: An integer indicating the current timestep of the episode.\n \"\"\"\n\n if self.benchmark_sim_analytics:\n self.benchmark_sim_analytics.update(timestep)\n\n return\n\n def update_after_one_harness_step(\n self, sim_run: bool, terminated: bool, reward: float, *, timestep: int\n ) -> None:\n \"\"\"Update the analytics after one step in the harness.\n\n Args:\n sim_run (bool): [description]\n terminated (bool): [description]\n reward (float): [description]\n timestep (int): [description]\n \"\"\"\n # Reset any attributes that monitor agent behavior between each simulation step.\n if sim_run and self.sim_analytics.agent_analytics:\n self.sim_analytics.agent_analytics.reset_after_one_simulation_step()\n\n # Once episode has terminated, check if episode performance is the best so far.\n if terminated:\n self.episodes_total += 1\n\n current_unburned = self.sim_analytics.data.unburned\n update_best_episode_performance = True\n if self.best_episode_performance:\n max_unburned = self.best_episode_performance.max_unburned\n if current_unburned <= max_unburned:\n update_best_episode_performance = False\n\n if update_best_episode_performance:\n self.best_episode_performance = BestEpisodePerformance(\n max_unburned=current_unburned,\n sim_area=self.sim_analytics.sim.fire_map.size,\n num_sim_steps=self.sim_analytics.num_sim_steps,\n episode=self.episodes_total,\n reward=reward,\n )\n perf = self.best_episode_performance\n logger.info(f\"Episode {self.episodes_total}: {perf}\")\n\n def reset(self, env_is_rendering: bool = False):\n \"\"\"Resets attributes that track data within each episode.\n\n This method is intended to be called within after the call to\n `ReactiveHarness._do_one_agent_step()` (within `ReactiveHarness.step()`).\n\n \"\"\"\n\n self.sim_analytics.reset(env_is_rendering)\n if self.benchmark_sim_analytics is not None:\n self.sim_analytics.benchmark_exists = True\n\n if self.benchmark_sim_analytics:\n self.benchmark_sim_analytics.reset(env_is_rendering)\n\n def save_sim_history(self, logdir: str, total_iters: int) -> None:\n \"\"\"TODO Add docstring.\"\"\"\n self.sim_analytics.data.save_episode_history(logdir, total_iters)\n\n if self.benchmark_sim_analytics:\n self.benchmark_sim_analytics.data.save_episode_history(logdir, total_iters)\n\n # def log_dfs(self):\n # \"\"\"Log the dataframes that are being tracked by the analytics.\"\"\"\n # logger.info(\"sim_analytics.df\")\n # logger.info(self.sim_analytics.df.to_markdown())\n # if self.benchmark_sim_analytics:\n # logger.info(\"benchmark_sim_analytics.df\")\n # logger.info(self.benchmark_sim_analytics.df.to_markdown())\n\n # if self.sim_analytics.agent_analytics:\n # logger.info(\"sim_analytics.agent_analytics.df\")\n # logger.info(self.sim_analytics.agent_analytics.df.to_markdown())"
},
{
"identifier": "RLHarness",
"path": "simharness2/environments/rl_harness.py",
"snippet": "class RLHarness(MultiAgentEnv, ABC):\n \"\"\"`Simulation` wrapper enabling RL agent's to interact with different simulators.\n\n The most important API methods a RLHarness exposes are `step()`, `reset()`,\n `render()`, and `close()`.\n\n Longer class information... FIXME.\n Longer class information... FIXME.\n\n Attributes:\n sim: A subclass of `Simulation` that defines a given simulator.\n movements: A list containing the movements available to a given agent. For\n example, possible movements could be: [\"up\", \"down\", \"left\", \"right\"].\n interactions: A list containing the interactions available to a given agent.\n For example, if the sim IS-A `FireSimulation`, possible interactions\n could be: [\"fireline\", \"scratchline\", \"wetline\"]. To learn more, see\n https://gitlab.mitre.org/fireline/simulators/simfire/-/blob/main/simfire/sim/simulation.py#L269-280\n attributes: (FIXME) A list containing the input features into the observations.\n Each feature is a channel in the input observation.\n normalized_attributes: A list containing attributes that need to be normalized.\n Any and all values within `normalized_attributes` must exist in `attributes`!\n deterministic: A boolean indicating whether the initial state of the environment\n is deterministic.\n sim_agent_id: FIXME.\n harness_to_sim: FIXME.\n sim_to_harness: FIXME.\n min_maxes: FIXME.\n low: FIXME.\n high: FIXME.\n observation_space: FIXME.\n action_space: FIXME.\n sim_attributes: FIXME.\n nonsim_attributes: FIXME.\n \"\"\"\n\n def __init__(\n self,\n sim: FireSimulation,\n movements: List[str],\n interactions: List[str],\n attributes: List[str],\n normalized_attributes: List[str],\n action_space_cls: Callable,\n deterministic: bool = False,\n benchmark_sim: FireSimulation = None,\n num_agents: int = 1,\n ) -> None:\n \"\"\"Inits RLHarness with blah FIXME.\n\n Longer method information...\n Longer method information...\n\n Raises:\n AssertionError: FIXME.\n\n FIXME: Ideally, the docstr should encapsulate what is being initialized and any\n intuition behind design choices. This is relatively important since RLHarness\n serves as a base class that each environment will inherit from.\n \"\"\"\n # NOTE: The caller is responsible for creating the `FireSimulation` object (s),\n # and if a `benchmark_sim` is provided, it should be a separate object, identical\n # to `sim` (after initialization), but will not receive any mitigations.\n self.sim = sim\n self.benchmark_sim = benchmark_sim\n # Indicates (internally) whether a benchmark simulation should be used\n # FIXME: I'm not sure if we need `_use_benchmark_sim`, since we can just check\n # if `benchmark_sim` is None or not.\n # self._use_benchmark_sim = True if benchmark_sim else False\n # TODO Create self._time_arg_passed_to_sim_run and set default value to 1. This\n # would allow the simulation to be run for an arbitrary number of timesteps.\n\n # TODO: use more apt name, ex: `available_movements`, `possible_movements`.\n self.movements = copy.deepcopy(movements)\n # TODO: use more apt name, ex: `available_interactions`, `possible_interactions`.\n self.interactions = copy.deepcopy(interactions)\n self.attributes = attributes\n # TODO: Maybe use `attributes_to_normalize` over `normalized_attributes`?\n self.normalized_attributes = normalized_attributes\n # FIXME: remove `deterministic` from the constructor; externally randomize env.\n self.deterministic = deterministic\n\n if not set(self.normalized_attributes).issubset(self.attributes):\n raise AssertionError(\n f\"All normalized attributes ({str(self.normalized_attributes)}) must be \"\n f\"in attributes ({str(self.attributes)})!\"\n )\n\n # Retrieve the observation space and action space for the simulation.\n sim_attributes = self.sim.get_attribute_data()\n sim_actions = self.sim.get_actions()\n\n # Before verifying that all interactions are supported by the simulator, we need\n # to remove the \"none\" interaction (if it exists).\n if \"none\" in self.interactions:\n none_idx = self.interactions.index(\"none\")\n interaction_types = (\n self.interactions[:none_idx] + self.interactions[none_idx + 1 :]\n )\n else:\n interaction_types = self.interactions\n\n if not set(interaction_types).issubset(list(sim_actions.keys())):\n raise AssertionError(\n f\"All interactions ({str(interaction_types)}) must be \"\n f\"in the simulator's actions ({str(list(sim_actions.keys()))})!\"\n )\n\n # NOTE: `self.harness_to_sim` used in `ReactiveHarness._update_mitigation()`.\n # FIXME `self.sim_to_harness` is NOT used anywhere else.\n self._separate_sim_nonsim(sim_attributes)\n self.harness_to_sim, self.sim_to_harness = self._sim_harness_conv(sim_actions)\n\n self.num_agents = num_agents\n # Each sim_agent_id is used to \"encode\" the agent position within the `fire_map`\n # dimension of the returned observation of the environment. The intention is to\n # help the model learn/use the location of the respective agent on the fire_map.\n # NOTE: Assume that every simulator will support 3 base scenarios:\n # 1. Untouched (Ex: simfire.enums.BurnStatus.UNBURNED)\n # 2. Currently Being Affected (Ex: simfire.enums.BurnStatus.BURNING)\n # 3. Affected (Ex: simfire.enums.BurnStatus.BURNED)\n # The max value is +1 of the max mitigation value available (wrt the sim).\n self._agent_id_start = max(self.harness_to_sim.values()) + 1\n self._agent_id_stop = self._agent_id_start + self.num_agents\n self._sim_agent_ids = np.arange(self._agent_id_start, self._agent_id_stop)\n # FIXME: Usage of \"agent_{}\" doesn't allow us to delineate agents groups.\n self._agent_ids = {f\"agent_{i}\" for i in self._sim_agent_ids}\n\n self.min_maxes = self._get_min_maxes()\n # NOTE: calling `reshape()` to switch to channel-minor format.\n channel_lows = np.array(\n [[[self.min_maxes[channel][\"min\"]]] for channel in self.attributes]\n ).reshape(1, 1, len(self.attributes))\n channel_highs = np.array(\n [[[self.min_maxes[channel][\"max\"]]] for channel in self.attributes]\n ).reshape(1, 1, len(self.attributes))\n\n self._low = np.repeat(\n np.repeat(channel_lows, self.sim.config.area.screen_size[0], axis=1),\n self.sim.config.area.screen_size[0],\n axis=0,\n )\n self._high = np.repeat(\n np.repeat(channel_highs, self.sim.config.area.screen_size[0], axis=1),\n self.sim.config.area.screen_size[0],\n axis=0,\n )\n\n # Provide full (preferred format) observation- and action-spaces as Dicts\n # mapping agent IDs to the individual agents' spaces.\n # TODO: Should we pass `seed` to seed the RNG used to sample from the space?\n self._obs_space_in_preferred_format = True\n obs_space = spaces.Box(self._low, self._high, dtype=np.float32)\n self.observation_space = spaces.Dict(\n {agent_id: obs_space for agent_id in self._agent_ids}\n )\n\n self._action_space_in_preferred_format = True\n action_shape = self._get_action_space_shape(space_type=action_space_cls)\n action_space = action_space_cls(action_shape)\n self.action_space = spaces.Dict(\n {agent_id: action_space for agent_id in self._agent_ids}\n )\n\n @no_type_check\n @abstractmethod\n def reset(\n self,\n *,\n seed: Optional[int] = None,\n options: Optional[Dict[Any, Any]] = None,\n ) -> Tuple[Dict[Any, np.ndarray], Dict[Any, Dict[Any, Any]]]:\n \"\"\"Resets the environment to an initial state.\n\n This method generates a new starting state often with some randomness to ensure\n that the agent explores the state space and learns a generalized policy about the\n environment. This randomness can be controlled with the `seed` parameter.\n\n Subclasses, such as the ReactiveHarness, typically do the following within\n the overriden reset() method:\n 1. set `self.num_burned = 0`.\n 2. handle `self.deterministic`\n 3. set `output = super().reset()`, which executes the below code and sets\n `output` (in child class reset()) to the return value, `self.state`.\n\n Arguments:\n seed: The (optional) int seed that is used to initialize the environment's\n PRNG (np_random). If the environment does not already have a PRNG and\n `seed=None` (the default option) is passed,\n\n Returns:\n An ndarray containing the initial state of the environment.\n \"\"\"\n super().reset(seed=seed, options=options)\n\n @abstractmethod\n def step(\n self, action_dict: Dict[Any, np.ndarray]\n ) -> Tuple[\n Dict[Any, np.ndarray],\n Dict[Any, float],\n Dict[Any, bool],\n Dict[Any, bool],\n Dict[Any, Dict[Any, Any]],\n ]:\n \"\"\"Run one timestep of the environment's dynamics.\n\n When end of episode is reached (`terminated or truncated` is True), you are\n responsible for calling `reset()` to reset the environment's state for the next\n episode.\n\n Arguments:\n action: An action provided by the agent to update the environment state.\n\n Returns:\n observation: A ndarray containing the observation of the environment.\n reward: A float representing the reward obtained as a result of taking the\n action.\n terminated: A boolean indicating whether the agent reaches the terminal state\n (as defined under the MDP of the task) which can be positive or negative.\n An example is reaching the goal state, or moving into the lava from the\n Sutton and Barton, Gridworld. If true, the user needs to call `reset()`.\n truncated: A boolean indicating whether the truncation condition outside\n the scope of the MDP is satisfied. Typically, this is a timelimit, but\n could also be used to indicate an agent physically going out of bounds.\n Can be used to end the episode prematurely before a terminal state is\n reached. If true, the user needs to call `reset()`.\n info: A dictionary containing auxiliary diagnostic information (helpful for\n debugging, learning, and logging). This might, for instance, contain:\n - metrics that describe the agent's performance state\n - variables that are hidden from observations, or\n - individual reward terms that are combined to produce the total\n reward.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def render(self) -> None:\n \"\"\"Render a visualization of the environment.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_nonsim_attribute_data(self) -> OrderedDict[str, np.ndarray]:\n \"\"\"Get data that does not come from the simulation.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_nonsim_attribute_bounds(self) -> OrderedDict[str, Dict[str, int]]:\n \"\"\"Get bounds for data that does not come from the simulation.\"\"\"\n raise NotImplementedError\n\n def _get_status_categories(self, disaster_categories: List[str]) -> List[str]:\n \"\"\"Get disaster categories that aren't interactions.\n\n Arguments:\n disaster_categories (List[str]): List of potential Simulation space categories\n\n Returns:\n A list containing disaster categories (str), with interactions removed.\n \"\"\"\n categories = []\n for cat in disaster_categories:\n if cat not in self.interactions:\n categories.append(cat)\n return categories\n\n def _separate_sim_nonsim(self, sim_attributes: OrderedDict[str, np.ndarray]) -> None:\n \"\"\"Separate attributes based on if they are supported by the Simulation or not.\n\n Arguments:\n sim_attributes: An ordered dictionary linking all attributes of\n the Simulation to their respective data within the Sim.\n \"\"\"\n self.sim_attributes = []\n self.nonsim_attributes = []\n for attribute in self.attributes:\n if attribute not in sim_attributes.keys():\n self.nonsim_attributes.append(attribute)\n else:\n self.sim_attributes.append(attribute)\n\n def _sim_harness_conv(\n self, sim_actions: Dict[str, IntEnum]\n ) -> Tuple[OrderedDict[int, int], OrderedDict[int, int]]:\n \"\"\"Create conversion dictionaries for action (Sim) <-> interaction (Harness).\n\n Arguments:\n sim_actions: A dictionary mapping the action/mitigation strategies available\n to the corresponding `Enum` value within the simulation. FIXME update wording\n\n Returns:\n A tuple containing two ordered dictionaries for attribute conversion. The\n first will map interaction to action. and the second will map action to\n interaction.\n \"\"\"\n # NOTE: hts == \"harness_to_sim\" and sth == \"sim_to_harness\"\n hts_action_conv = ordered_dict()\n sth_action_conv = ordered_dict()\n\n actions = self.interactions\n if len(actions) > 0:\n # Using the \"valid\" interaction_types, populate the conversion dicts.\n valid_idxs = [actions.index(act) for act in actions if act != \"none\"]\n\n for idx in valid_idxs:\n interaction = self.interactions[idx]\n hts_action_conv[idx] = sim_actions[interaction].value\n sth_action_conv[sim_actions[interaction].value] = idx\n\n return hts_action_conv, sth_action_conv\n\n def _select_from_dict(\n self, dictionary: OrderedDict[str, Any], selections: List[str]\n ) -> OrderedDict[str, Any]:\n \"\"\"Create an ordered subset with only specific keys from the input `dictionary`.\n\n Arguments:\n dictionary: A dictionary used to extract values from.\n selections: A list containing the desired keys to keep from `dictionary`.\n\n Returns:\n An ordered dictionary containing a subset of the input `dictionary`.\n \"\"\"\n return_dict = OrderedDict()\n\n for selection in selections:\n return_dict[selection] = dictionary[selection]\n\n return return_dict\n\n def _get_min_maxes(self) -> OrderedDict[str, Dict[str, Tuple[int, int]]]:\n \"\"\"Retrieves the minimum and maximum for all relevant attributes.\"\"\"\n # FIXME (afennelly) I think the return type should be:\n # - OrderedDict[str, Dict[str, object]]\n # TODO update docstring to be more specific\n # TODO add comments and refactor as needed\n sim_min_maxes = ordered_dict()\n # fetch the observation space bounds for the simulation.\n sim_bounds = self.sim.get_attribute_bounds()\n for attribute in self.sim_attributes:\n sim_min_maxes[attribute] = sim_bounds[attribute]\n\n nonsim_min_maxes = self._select_from_dict(\n self.get_nonsim_attribute_bounds(), self.nonsim_attributes\n )\n\n if len(nonsim_min_maxes) != len(self.nonsim_attributes):\n raise AssertionError(\n f\"Min-Maxes for {len(nonsim_min_maxes)} nonsim attributes were given but \"\n f\"there are {len(self.nonsim_attributes)} nonsim attributes.\"\n )\n\n min_maxes = ordered_dict({**sim_min_maxes, **nonsim_min_maxes})\n\n return min_maxes\n\n def _normalize_obs(\n self, observations: Dict[str, np.ndarray]\n ) -> Dict[str, np.ndarray]:\n \"\"\"Convert an observation to the [0,1] range based on known min and max.\"\"\"\n\n def normalize(data, min_max):\n # FIXME: Explain purpose/intention behind using a nested class here.\n return (data - min_max[\"min\"]) / (min_max[\"max\"] - min_max[\"min\"])\n\n for attribute in self.normalized_attributes:\n observations[attribute] = normalize(\n observations[attribute], self.min_maxes[attribute]\n )\n\n return observations\n\n def _get_action_space_shape(\n self, space_type: spaces.Space\n ) -> Union[int, np.ndarray, List]:\n \"\"\"Get the shape of the action space, dependent on the action space type.\n\n Args:\n space_type (spaces.Space): [description]\n\n Raises:\n NotImplementedError: [description]\n\n Returns:\n Union[int, np.ndarray, List]: [description]\n \"\"\"\n if space_type is spaces.Discrete:\n return len(self.movements) * len(self.interactions)\n elif space_type is spaces.MultiDiscrete:\n return [len(self.movements), len(self.interactions)]\n else:\n # TODO provide a descriptive error message.\n raise NotImplementedError"
},
{
"identifier": "BaseReward",
"path": "simharness2/rewards/base_reward.py",
"snippet": "class BaseReward(ABC):\n \"\"\"Abstract Class for Reward_Class template with the update functions implemented.\"\"\"\n\n def __init__(self, harness_analytics: ReactiveHarnessAnalytics):\n \"\"\"TODO Add constructor docstring.\"\"\"\n # reference to the harness_analytics object within the environment\n self.harness_analytics = harness_analytics\n # helper variable indicating the total number of squares in the simulation map\n self._sim_area = (\n self.harness_analytics.sim_analytics.sim.config.area.screen_size[0] ** 2\n )\n\n @abstractmethod\n def get_reward(self, *, timestep: int, sim_run: bool) -> float:\n \"\"\"TODO Add docstring.\"\"\"\n pass\n\n @abstractmethod\n def get_timestep_intermediate_reward(self, timestep: int) -> float:\n \"\"\"TODO Add docstring.\"\"\"\n pass"
},
{
"identifier": "ReactiveAgent",
"path": "simharness2/agents/agent.py",
"snippet": "class ReactiveAgent:\n \"\"\"A simple agent that reacts to its environment.\n\n FIXME: update docstring style, using llama2 suggestion for now.\n Parameters\n ----------\n agent_id : int\n The unique ID of this agent.\n sim_id : int\n The unique ID of the simulation this agent belongs to.\n initial_position : tuple[int, int]\n The (x,y) starting position of the agent, where (0,0) is the top-left corner of\n the map and (max_x, max_y) is the bottom-right corner of the map.\n\n Properties\n ----------\n x : int\n The current X coordinate of the agent.\n y : int\n The current Y coordinate of the agent.\n row : int\n The current row number where the agent resides.\n col : int\n The current column number where the agent resides.\n latest_movement : str or None\n The last movement made by the agent, if applicable.\n latest_interaction : str or None\n The last interaction had by the agent, if applicable.\n mitigation_placed : bool\n Whether the agent has placed any mitigations recently.\n moved_off_map : bool\n Whether the agent has moved off the map recently.\n\n \"\"\"\n\n # NOTE: `agent_speed` ommitted, only used within `_do_one_simulation_step`\n # Attrs that should be specified on initialization\n agent_id: Any # ex: \"agent_0\", \"dozer_0\", \"handcrew_0\", \"ff_0\", etc.\n sim_id: int # should be contained within sim.agents.keys()\n initial_position: Tuple[int, int]\n\n # Attributes with default values\n latest_movement: int = None\n latest_interaction: int = None\n mitigation_placed: bool = False\n moved_off_map: bool = False\n\n def __post_init__(self):\n self._current_position = self.initial_position\n self.x, self.y = self.initial_position\n self.row, self.col = self.y, self.x\n\n @property\n def current_position(self) -> Tuple[int, int]:\n return self._current_position\n\n @current_position.setter\n def current_position(self, value: Tuple[int, int]):\n self._current_position = value\n self.x, self.y = value\n self.row, self.col = self.y, self.x\n\n @property\n def x(self) -> int:\n return self._current_position[0]\n\n @x.setter\n def x(self, value: int):\n self._current_position = (value, self.y)\n\n @property\n def y(self) -> int:\n return self._current_position[1]\n\n @y.setter\n def y(self, value: int):\n self._current_position = (self.x, value)\n\n @property\n def row(self) -> int:\n return self._current_position[1]\n\n @row.setter\n def row(self, value: int):\n self._current_position = (self.x, value)\n\n @property\n def col(self) -> int:\n return self._current_position[0]\n\n @col.setter\n def col(self, value: int):\n self._current_position = (value, self.y)\n\n def reset(self):\n self.latest_movement = None\n self.latest_interaction = None\n self.mitigation_placed = False\n self.moved_off_map = False\n self.__post_init__()\n # self.current_position = self.initial_position\n # self.reward = 0\n\n # def move(self, env: np.ndarray, direction: int) -> bool:\n # \"\"\"Moves the agent in the given direction if possible.\"\"\"\n # current_x, current_y = self.current_position\n # dx, dy = self.actions[direction]\n # next_x, next_y = current_x + dx, current_y + dy\n\n # if env[next_y][next_x] == \"_\":\n # self.current_position = (next_x, next_y)\n # return True\n # else:\n # return False"
}
] | import logging
import os
import math
import copy
import numpy as np
from collections import OrderedDict as ordered_dict
from functools import partial
from typing import Any, Dict, List, Optional, OrderedDict, Tuple
from gymnasium import spaces
from gymnasium.envs.registration import EnvSpec
from ray.rllib.env.env_context import EnvContext
from simfire.enums import BurnStatus
from simfire.utils.config import Config
from simharness2.analytics.harness_analytics import ReactiveHarnessAnalytics
from simharness2.environments.rl_harness import RLHarness
from simharness2.rewards.base_reward import BaseReward
from simharness2.agents import ReactiveAgent | 10,603 | # Generate random agent locations for the start of the episode.
elif method == "random":
# Create a boolean mask of valid positions (i.e., inside the boundaries).
mask = np.ones(self.sim.fire_map.shape, dtype=bool)
# Agent (s) can only be spawned on an unburning square
# NOTE: Any other "prohibited" agent start locations can be specified here.
mask[np.where(self.sim.fire_map != BurnStatus.UNBURNED)] = False
# Randomly select unique positions from the valid ones.
idx = np.random.choice(range(mask.sum()), size=self.num_agents, replace=False)
flat_idx = np.argwhere(mask.flatten())[idx].flatten()
agent_locs = np.vstack(np.unravel_index(flat_idx, mask.shape)).T
# Populate the `self.agents` dict with `ReactiveAgent` object (s).
agent_ids = sorted(self._agent_ids, key=lambda x: int(x.split("_")[-1]))
sim_ids = self._sim_agent_ids
for agent_str, sim_id, loc in zip(agent_ids, sim_ids, agent_locs):
agent = ReactiveAgent(agent_str, sim_id, tuple(loc))
self.agents[agent_str] = agent
# This should be caught within the init. To be safe, also raise error here.
else:
raise NotImplementedError(f"Agent spawn method {method} not implemented.")
def _configure_env_rendering(self, should_render: bool) -> None:
"""Configure the environment's `FireSimulation` to be rendered (or not).
If the simulation should be rendered, then the `headless` parameter in the
simulation's config (file) should be set to `False`, enabling the usage of pygame.
Additionally, the environment's `_should_render` attribute is set to ensure
that rendering is active when desired. This is especially important when the
number of eval episodes, specified via `evaluation.evaluation_duration`, is >1.
"""
sim_data = self.sim.config.yaml_data
sim_data["simulation"]["headless"] = not should_render
# Update simulation's config attribute.
logger.info("Updating the `self.sim.config` with new `Config` object...")
self.sim.config = Config(config_dict=sim_data)
# Reset the simulation to ensure that the new config is used.
logger.info(f"Resetting `self.sim` to configure rendering == {should_render}.")
self.sim.reset()
# Update the simulation's rendering attribute to match the provided value.
if should_render:
logger.info("Setting SDL_VIDEODRIVER environment variable to 'dummy'...")
os.environ["SDL_VIDEODRIVER"] = "dummy"
self.sim.rendering = should_render
# Indicate whether the environment's `FireSimulation` should be rendered.
self._should_render = should_render
def _increment_evaluation_iterations(self) -> None:
"""Increment the number of evaluation iterations that have been run."""
self._num_eval_iters += 1
# def _set_agent_pos_for_episode_start(self):
# """Set the agent's initial position in the map for the start of the episode."""
# for agent_id in self._agent_ids:
# valid_pos = False
# # Keep looping until we get a valid position
# while not valid_pos:
# random_pos = self.np_random.integers(
# 0, self.sim.config.area.screen_size, size=2, dtype=int
# )
# valid_pos = self._check_start_pos(random_pos)
# self.agent_pos[agent_id] = random_pos
def _log_env_init(self):
"""Log information about the environment that is being initialized."""
if self._is_eval_env:
i, j = self.worker_idx, self.vector_idx
logger.warning(f"Object {hex(id(self))}: index (i+1)*(j+1) == {(i+1)*(j+1)}")
if not self._debug_mode:
return
# TODO: What log level should we use here?
logger.info(f"Object {hex(id(self))}: worker_index: {self.worker_idx}")
logger.info(f"Object {hex(id(self))}: vector_index: {self.vector_idx}")
logger.info(f"Object {hex(id(self))}: num_workers: {self.num_workers}")
logger.info(f"Object {hex(id(self))}: is_remote: {self.is_remote}")
def _log_env_reset(self):
"""Log information about the environment that is being reset."""
if not self._debug_mode or self._episodes_debugged > self._debug_duration:
return
# TODO: What log level should we use here?
for idx, feat in enumerate(self.attributes):
low, high = self._low[..., idx].min(), self._high[..., idx].max()
obs_min = round(self.state[..., idx].min(), 2)
obs_max = round(self.state[..., idx].max(), 2)
# Log lower bound of the (obs space) and max returned obs for each attribute.
logger.info(f"{feat} LB: {low}, obs min: {obs_min}")
# Log upper (lower) bounds of the returned observations for each attribute.
logger.info(f"{feat} UB: {high}, obs max: {obs_max}")
# Increment the number of episodes that have been debugged.
self._episodes_debugged += 1
def _setup_harness_analytics(self, analytics_partial: partial) -> None:
"""Instantiates the `harness_analytics` used to monitor this `ReactiveHarness` obj.
Arguments:
analytics_partial:
A `functools.partial` object that indicates the top-level
class that will be used to monitor the `ReactiveHarness` object. The user
is expected to provide the `sim_data_partial` keyword argument, along
with a valid value.
Raises:
TypeError: If `harness_analytics_partial.keywords` does not contain a
`sim_data_partial` key with value of type `functools.partial`.
"""
| """ReactiveHarness with support for mutiple agents operating simulanteously.
This file contains the environment file for `MARLReactiveHarness` which is an environment
with multiple agents operating at the same time within the same environment. The code
is very similar to the single agent case, just multiplied for each agents action. Agents
can be monogomous or heterogenous depending on the training run - meaning agents can
have the same speed/abilities or different.
The reward function used is configurable depending on the fire manager intent displayed
within the training config and corresponding reward class.
"""
# FIXME: Update logger configuration.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
)
logger.addHandler(handler)
logger.propagate = False
class MARLReactiveHarness(RLHarness): # noqa: D205,D212,D415
"""
### Description
Environment which potrays the case where a fire has already started and we are
deploying our resources to best mitigate the damage. Multiple agents are interacting
at once with the environment in a collaborative manner.
### Action Space
The action space type is `MultiDiscrete`, and `sample()` returns an `np.ndarray` of
shape `(M+1,I+1)`, where `M == movements` and `I == interactions`.
- Movements refer to actions where the agent **traverses** the environment.
- For example, possible movements could be: ["up", "down", "left", "right"].
- Interactions refer to actions where the agent **interacts** with the environment.
- For example, if the sim IS-A `FireSimulation`, possible interactions
could be: ["fireline", "scratchline", "wetline"]. To learn more, see
[simulation.py](https://gitlab.mitre.org/fireline/simulators/simfire/-/blob/main/simfire/sim/simulation.py#L269-280).
- Actions are determined based on the provided (harness) config file.
- When `super()._init__()` is called, the option "none" is inserted to element 0 of
both `movements` and `interactions`, representing "don't move" and
"don't interact", respectively (this is the intuition for the +1 in the shape).
### Observation Space
The observation space type is `Box`, and `sample()` returns an `np.ndarray` of shape
`(A,X,X)`, where `A == len(ReactiveHarness.attributes)` and
`X == ReactiveHarness.sim.config.area.screen_size`.
- The value of `ReactiveHarness.sim.config.area.screen_size` is determined
based on the value of the `screen_size` attribute (within the `area` section) of
the (simulation) config file. See `simharness2/sim_registry.py` to find more info
about the `register_simulation()` method, which is used to register the simulation
class and set the config file path associated with a given simulation.
- The number of `attributes` is determined by the `attributes` attribute (within the
`RLHARNESS` section) of the (harness) config file. Each attribute must be contained
in the observation space returned for the respective `Simulation` class. The
locations within the observation are based ontheir corresponding location within
the array.
### Rewards
The agent is rewarded for saving the most land and reducing the amount of affected
area.
- TODO(afennelly) add more details about the reward function.
- TODO(afennelly) implement modular reward function configuration.
### Starting State
The initial map is set by data given from the Simulation.
- TODO(afennelly) add more details about the starting state.
### Episode Termination
The episode ends once the disaster is finished and it cannot spread any more.
- TODO(afennelly) add more details about the episode termination.
"""
def __init__(self, config: EnvContext) -> None:
"""See RLHarness (parent/base class)."""
# NOTE: We don't set a default value in `config.get` for required arguments.
# FIXME Most, if not all, of these can be moved into the RLHarness.
# TODO Should we make an RLlibHarness class to handle all these extras?
# Indicates that environment information should be logged at various points.
self._set_debug_options(config)
self._store_env_context(config)
# FIXME: Perform env setup depending on if the env is used for eval/train.
# Indicates whether the environment was created for evaluation purposes.
self._is_eval_env = config.get("is_evaluation_env", False)
if self._is_eval_env:
self._prepare_eval_env(config)
else:
self._prepare_train_env(config)
# Set the max number of steps that the environment can take before truncation
# self.spec.max_episode_steps = 1000
self.spec = EnvSpec(
id="MARLReactiveHarness-v0",
entry_point="simharness2.environments.reactive_marl:MARLReactiveHarness",
max_episode_steps=2000,
)
# Track the number of timesteps that have occurred within an episode.
self.timesteps: int = 0
action_space_partial: partial = config.get("action_space_partial")
# Ensure the provided `action_space_partial` has a `func` attribute.
if not isinstance(action_space_partial, partial):
raise TypeError(
f"Expected `action_space_partial` to be an instance of "
f"`functools.partial`, but got {type(action_space_partial)}."
)
super().__init__(
sim=config.get("sim"),
movements=config.get("movements"),
interactions=config.get("interactions"),
attributes=config.get("attributes"),
normalized_attributes=config.get("normalized_attributes"),
action_space_cls=action_space_partial.func,
deterministic=config.get("deterministic"),
benchmark_sim=config.get("benchmark_sim"),
num_agents=config.get("num_agents", 1),
)
self._log_env_init()
# Spawn the agent (s) that will interact with the simulation
logger.debug(f"Creating {self.num_agents} agent (s)...")
agent_init_method = config.get("agent_initialization_method", "automatic")
if agent_init_method == "manual":
agent_init_positions = config.get("initial_agent_positions", None)
if agent_init_positions is None:
raise ValueError(
"Must provide 'initial_agent_positions' when using 'manual' agent initialization method."
)
self._create_agents(method="manual", pos_list=agent_init_positions)
elif agent_init_method == "automatic":
self._create_agents(method="random")
else:
raise ValueError(
"Invalid agent initialization method. Must be either 'automatic' or 'manual'."
)
# NOTE: only used in `_do_one_simulation_step`, so keep as harness attr
self.agent_speed: int = config.get("agent_speed")
# If provided, construct the class used to monitor this `ReactiveHarness` object.
# FIXME Move into RLHarness
analytics_partial = config.get("harness_analytics_partial")
self._setup_harness_analytics(analytics_partial)
# If provided, construct the class used to perform reward calculation.
self._setup_reward_cls(reward_cls_partial=config.get("reward_cls_partial"))
# If the agent(s) places an effective mitigation (not placed in already damaged/mitigated square), this is set to True.
# FIXME Have this tracked across all of the agents
self.true_mitigation_placed: bool = False
# Bool to toggle the ability to terminate the agent simulation early if at the current timestep of the agent simulation
# , the agents have caused more burn damage (burned + burning) than the final state of the benchmark fire map
# FIXME Have this value set in the configs
self._terminate_if_greater_damage = True
if self.benchmark_sim:
#Validate that benchmark and sim match seeds
assert self.sim.get_seeds() == self.benchmark_sim.get_seeds()
#create static list to store the episode benchsim firemaps
self.max_bench_length = 600
self.bench_firemaps = [0] * self.max_bench_lenght
#run the first benchmark sim to generate the benchmark sim firemaps and metrics for this episode
self._run_benchmark()
def _set_debug_options(self, config: EnvContext):
"""Set the debug options for the environment."""
self._debug_mode = config.get("debug_mode", False)
self._debug_duration = config.get("debug_duration", 1) # unit == episodes
self._episodes_debugged = 0
logger.debug(f"Initializing environment {hex(id(self))}")
def _store_env_context(self, config: EnvContext):
"""Store the environment context for later use."""
# When there are multiple workers created, this uniquely identifies the worker
# the env is created in. 0 for local worker, >0 for remote workers.
self.worker_idx = config.worker_index
# When there are multiple envs per worker, this uniquely identifies the env index
# within the worker. Starts from 0.
self.vector_idx = config.vector_index
# Whether individual sub-envs (in a vectorized env) are @ray.remote actors.
self.is_remote = config.remote
# Total number of (remote) workers in the set. 0 if only a local worker exists.
self.num_workers = config.num_workers
def _prepare_eval_env(self, config: EnvContext):
"""Prepare the environment for evaluation purposes."""
eval_duration = config.get("evaluation_duration")
if self.num_workers != 0:
if eval_duration and not (eval_duration / self.num_workers).is_integer():
raise ValueError(
f"The `evaluation_duration` ({eval_duration}) must be evenly "
f"divisible by the `num_workers` ({self.num_workers}.)"
)
# Indicates how many rounds of evaluation will be run using this environment.
self._total_eval_rounds = (
eval_duration / self.num_workers if eval_duration else 0
)
else:
# Eval will be run in the algorithm process, so no need to divide.
self._total_eval_rounds = eval_duration if eval_duration else 0
self._current_eval_round = 1
# Incremented on each call to `RenderEnv.on_evaluate_start()` callback, via the
# `_increment_evaluation_iterations()` helper method.
self._num_eval_iters = 0
self.fire_scenarios = config.get("scenarios", None)
def _prepare_train_env(self, config: EnvContext):
"""Prepare the environment for training purposes."""
# TODO Add any training-specific logic here
pass
def set_trial_results_path(self, path: str) -> None:
"""Set the path to the directory where (tune) trial results will be stored."""
self._trial_results_path = path
def step(
self, action_dict: Dict[Any, np.ndarray]
) -> Tuple[
Dict[Any, np.ndarray],
Dict[Any, float],
Dict[Any, bool],
Dict[Any, bool],
Dict[Any, Dict[Any, Any]],
]: # noqa FIXME
# TODO: Refactor to better utilize `RLHarness` ABC, or update the API.
# TODO: Can we parallelize this method? If so, how? I'm not sure if that
# will make sense wrt updating the sim, etc.?
for agent_id, agent in self.agents.items():
self._do_one_agent_step(agent, action_dict[agent_id])
if self.harness_analytics:
self.harness_analytics.update_after_one_agent_step(
timestep=self.timesteps, agents=self.agents, true_mitigation_placed = self.true_mitigation_placed
)
# NOTE: `sim_run` indicates if `FireSimulation.run()` was called. This helps
# indicate how to calculate the reward for the current timestep.
sim_run = self._do_one_simulation_step() # alternatively, self._step_simulation()
if sim_run and self.harness_analytics:
self.harness_analytics.update_after_one_simulation_step(
timestep=self.timesteps
)
# TODO(afennelly): Need to handle truncation properly. For now, we assume that
# the episode will never be truncated, but this isn't necessarily true.
truncated = False
# The simulation has not yet been run via `run()`
if self.sim.elapsed_steps == 0:
terminated = False
else:
terminated = not self.sim.active
# Calculate the reward for the current timestep
# TODO pass `terminated` into `get_reward` method
# FIXME: Update reward for MARL case!!
# TODO: Give each agent the "same" simple reward for now.
reward = self.reward_cls.get_reward(self.timesteps, sim_run)
# Terminate episode early if burn damage in Agent Sim is larger than final bench fire map
if self.benchmark_sim:
if self._terminate_if_greater_damage:
total_area = self.harness_analytics.sim_analytics.sim.config.area.screen_size[0] ** 2
sim_damaged_total = self.harness_analytics.sim_analytics.data.burned + self.harness_analytics.sim_analytics.data.burning
benchsim_damaged_total = total_area - self.harness_analytics.benchmark_sim_analytics.data.unburned
if sim_damaged_total > benchsim_damaged_total:
terminated = True
# TODO potentially add a static negative penalty for making the fire worse
# TODO account for below updates in the reward_cls.calculate_reward() method
# "End of episode" reward
#if terminated:
#reward += 10
if self.harness_analytics:
self.harness_analytics.update_after_one_harness_step(
sim_run, terminated, reward, timestep=self.timesteps
)
new_obs, rewards, truncateds, terminateds, infos = {}, {}, {}, {}, {}
truncs = set()
terms = set()
for agent_id, agent in self.agents.items():
new_obs[agent_id] = self.state
rewards[agent_id] = reward # FIXME !!
truncateds[agent_id] = truncated
terminateds[agent_id] = terminated
infos[agent_id] = {}
if truncated:
truncs.add(agent_id)
if terminated:
terms.add(agent_id)
terminateds["__all__"] = len(truncs) == self.num_agents
truncateds["__all__"] = len(terms) == self.num_agents
self.timesteps += 1 # increment AFTER method logic is performed (convention).
return new_obs, rewards, terminateds, truncateds, infos
# NOTE: if passing `agent` doesn't persist updates, pass `agent_id` instead.
def _do_one_agent_step(self, agent: ReactiveAgent, action: np.ndarray) -> None:
"""Move the agent and interact with the environment.
FIXME: Below details need to be changed to reflect API updates!!
Within this method, the movement and interaction that the agent takes are stored
in `self.latest_movements` and `self.latest_interactions`, respectively. If this
movement is not "none", then the agent's position on the map is updated and
stored in `self.agent_pos`.
Given some arbitrary method that defines whether a space in the simulation is
empty or not (see `_agent_pos_is_empty_space()`), the value of
`self.agent_pos_is_empty_space` is updated accordingly. If the space occupied by
the agent (`self.agent_pos`) is *empty* and the interaction is not "none", then
the agent will place a mitigation on the map and `self.mitigation_placed` is set
to True. Otherwise, `self.mitigation_placed` is set to False.
Data that we want to store after each AGENT step:
- interaction (via `_parse_action`)
- connected_mitigation (via `_update_mitigation`)
- movement (via `_parse_action`)
- moved_off_map (via `_update_agent_position`)
- near_fire (calculated within `AgentAnalytics.update`)
- burn_status (calculated within `AgentAnalytics.update`)
Additional data needed ONLY when storing all episode data:
- agent_pos (via `_update_agent_position`)
- timestep (via `self.timesteps`)
It seems like an efficient way to store the timestep data would be with a
namedtuple. I'm looking into more details now.
Args:
agent_id_num (int): _description_
action (np.ndarray): _description_
Returns:
_type_: _description_
"""
# Parse the movement and interaction from the action, and store them.
agent.latest_movement, agent.latest_interaction = self._parse_action(action)
interact = self.interactions[agent.latest_interaction] != "none"
# Ensure that mitigations are only placed on squares with `UNBURNED` status
if self._agent_pos_is_unburned(agent) and interact:
# NOTE: `self.mitigation_placed` is updated in `_update_mitigation()`.
self._update_mitigation(agent)
elif (not self._agent_pos_is_unburned()) and interact:
#set true_mitigation_placed to False if agent has placed mitigation in damaged/mitigated square
#FIXME: do for each agent
self.true_mitigation_placed = False
else:
# Overwrite value from previous timestep.
agent.mitigation_placed = False
# Update agent location on map
if self.movements[agent.latest_movement] != "none":
# NOTE: `agent.current_position` is updated in `_update_agent_position()`.
self._update_agent_position(agent)
def _parse_action(self, action: np.ndarray) -> Tuple[int, int]:
"""Parse the action into movement and interaction."""
# NOTE: Assuming that all agents are homogeneous
if isinstance(self.action_space, spaces.Dict):
unique_spaces = set([type(v) for v in self.action_space.values()])
if len(unique_spaces) != 1:
raise ValueError("Only homogeneous agents are currently supported.")
act_space = unique_spaces.pop()
# Handle the MultiDiscrete case
if issubclass(act_space, spaces.MultiDiscrete):
return action[0], action[1]
# Handle the Discrete case
elif issubclass(act_space, spaces.Discrete):
return action % len(self.movements), int(action / len(self.movements))
else:
raise NotImplementedError(f"{self.action_space} is not supported.")
# FIXME: Decide what to do with the SARL action parsing; keep for now.
# Handle the MultiDiscrete case
elif isinstance(self.action_space, spaces.MultiDiscrete):
return action[0], action[1]
# Handle the Discrete case
elif isinstance(self.action_space, spaces.Discrete):
return action % len(self.movements), int(action / len(self.movements))
else:
raise NotImplementedError(f"{self.action_space} is not supported.")
def _update_agent_position(self, agent: ReactiveAgent) -> None:
"""Update the agent's position on the map by performing the provided movement."""
# Store agent's current position in a temporary variable to avoid overwriting it.
map_boundary = self.sim.fire_map.shape[0] - 1
# Update the agent's position based on the provided movement.
movement_str = self.movements[agent.latest_movement]
# First, check that the movement string is valid.
if movement_str not in ["up", "down", "left", "right"]:
raise ValueError(f"Invalid movement string provided: {movement_str}.")
# Then, ensure that the agent will not move off the map.
elif movement_str == "up" and not agent.row == 0:
agent.row -= 1
elif movement_str == "down" and not agent.row == map_boundary:
agent.row += 1
elif movement_str == "left" and not agent.col == 0:
agent.col -= 1
elif movement_str == "right" and not agent.col == map_boundary:
agent.col += 1
# Movement invalid from current pos, so the agent movement will be ignored.
# Depending on `self.reward_cls`, the agent may receive a small penalty.
else:
# Inform caller that the agent cannot move in the provided direction.
logger.debug(f"Agent `sim_id`={agent.sim_id}")
logger.debug(
f"Agent can't move {movement_str} from row={agent.row}, col={agent.col}."
)
logger.debug("Setting `agent.moved_off_map = True` for agent...")
agent.moved_off_map = True
# Update the Simulation with new agent position (s).
point = [agent.col, agent.row, agent.sim_id]
self.sim.update_agent_positions([point])
def _agent_pos_is_unburned(self, agent: ReactiveAgent) -> bool:
"""Returns true if the space occupied by the agent has `BurnStatus.UNBURNED`."""
return self.sim.fire_map[agent.row, agent.col] == BurnStatus.UNBURNED
def _update_mitigation(self, agent: ReactiveAgent) -> None:
"""Interact with the environment by performing the provided interaction."""
sim_interaction = self.harness_to_sim[agent.latest_interaction]
mitigation_update = (agent.col, agent.row, sim_interaction)
self.sim.update_mitigation([mitigation_update])
agent.mitigation_placed = True
# Store indicator that a true mitigation was placed, which will be set back to False in self._do_one_agent_step if agent was in an already damaged/mitigated square
self.true_mitigation_placed = False
def _do_one_simulation_step(self) -> bool:
"""Check if the simulation should be run, and then run it if necessary."""
run_sim = self.timesteps % self.agent_speed == 0
# The simulation WILL NOT be run every step, unless `self.agent_speed` == 1.
if run_sim:
self._run_simulation()
# Prepare the observation that is returned in the `self.step()` method.
self._update_state()
return run_sim
def _run_simulation(self):
"""Run the simulation (s) for one timestep."""
self.sim.run(1)
def _run_benchmark(self):
"""Runs the entire benchmark sim and stores the data needed for the rewards and bench fire maps within each episode"""
#use timesteps_copy to track the matching timestep that each benchsim fire map will match with the sim fire map
timesteps_copy = 0
#if the benchmark simulation has not been updated yet
if self.benchmark_sim.elapsed_steps == 0:
self.benchmark_sim.run(1)
#update the benchsim metrics at this timesteps_copy in the harness analytics
if self.harness_analytics:
self.harness_analytics.update_bench_after_one_simulation_step(
timestep=timesteps_copy
)
#update timesteps_copy to next time the simulation with the agent will update
timesteps_copy = timesteps_copy + self.agent_speed
#store the bench fire map at the sim step
self.bench_firemaps[(self.harness_analytics.benchmark_sim_analytics.num_sim_steps) - 1] = np.copy(self.benchmark_sim.fire_map)
#continue to run the benchmark simulation and update the benchsim data/metrics after each sim step
while self.benchmark_sim.active == True:
self.benchmark_sim.run(1)
#update the benchsim metrics at this timesteps_copy in the harness analytics
if self.harness_analytics:
self.harness_analytics.update_bench_after_one_simulation_step(
timestep=timesteps_copy
)
#update timesteps_copy to next time the simulation with the agent will update
timesteps_copy = timesteps_copy + self.agent_speed
#update the size of self.bench_firemaps if this benchmark simulation has lasted longer than any previous benchmark simulations
if ((self.harness_analytics.benchmark_sim_analytics.num_sim_steps) - 1) > (self.max_bench_length - 1):
#append the bench fire map to the self.bench_firemaps
self.bench_firemaps.append(np.copy(self.benchmark_sim.fire_map))
#update the max length of the benchsim when defining future lists for self.bench_firemaps
self.max_bench_length = self.max_bench_length + 1
#else store the bench fire map at the sim step
else:
self.bench_firemaps[(self.harness_analytics.benchmark_sim_analytics.num_sim_steps) - 1] = np.copy(self.benchmark_sim.fire_map)
def _update_state(self):
"""Modify environment's state to contain updates from the current timestep."""
# Copy the fire map from the simulation so we don't overwrite it.
fire_map = np.copy(self.sim.fire_map)
# Update the fire map with the numeric identifier for the agent.
for agent in self.agents.values():
fire_map[agent.row, agent.col] = agent.sim_id
# Modify the state to contain the updated fire map
self.state[..., self.attributes.index("fire_map")] = fire_map
#Modify the state to contain the bench fire map at that sim step
if "bench_fire_map" in self.attributes:
bench_fire_map_idx = self.attributes.index("bench_fire_map")
#if the simulation has lasted longer that the benchmark sim, use the final state of the benchsim fire map
if (self.harness_analytics.benchmark_sim_analytics.num_sim_steps < self.harness_analytics.sim_analytics.num_sim_steps):
self.state[..., (bench_fire_map_idx)] = self.bench_firemaps[(self.harness_analytics.benchmark_sim_analytics.num_sim_steps) - 1]
#else get the benchmark sim fire map from the same sim step as the simulation fire map
else:
self.state[..., (bench_fire_map_idx)] = self.bench_firemaps[(self.harness_analytics.sim_analytics.num_sim_steps) - 1]
#Modify the state to contain the final state of bench fire map
if "bench_fire_map_final" in self.attributes:
bench_fire_map_final_idx = self.attributes.index("bench_fire_map_final")
self.state[..., (bench_fire_map_final_idx)] = self.bench_firemaps[(self.harness_analytics.benchmark_sim_analytics.num_sim_steps) - 1]
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[Dict[Any, Any]] = None,
) -> Tuple[Dict[Any, np.ndarray], Dict[Any, Dict[Any, Any]]]:
# log.info("Resetting environment")
# Use the following line to seed `self.np_random`
super().reset(seed=seed)
# Reset the `Simulation` to initial conditions. In particular, this resets the
# `fire_map`, `terrain`, `fire_manager`, and all mitigations.
logger.debug("Resetting `self.sim`...")
self.sim.reset()
bench_exists = False
if self.benchmark_sim:
logger.debug("Resetting `self.benchmark_sim`...")
# set the benchmark seeds to match the sim seeds
self.benchmark_sim.set_seeds(seed_dict)
# reset benchmark simulation
self.benchmark_sim.reset()
bench_exists = True
# Reset the agent's contained within the `FireSimulation`.
logger.debug("Resetting `self.agents`...")
for agent_id, agent in self.agents.items():
self.agents[agent_id].reset()
# Reset `ReactiveHarnessAnalytics` to initial conditions, if it exists.
if self.harness_analytics:
logger.debug("Resetting `self.harness_analytics`...")
self.harness_analytics.reset(benchmark_exists=bench_exists)
# Get the initial state of the `FireSimulation`, after it has been reset (above).
sim_observations = super()._select_from_dict(
self.sim.get_attribute_data(), self.sim_attributes
)
nonsim_observations = super()._select_from_dict(
self.get_nonsim_attribute_data(), self.nonsim_attributes
)
if len(nonsim_observations) != len(self.nonsim_attributes):
raise AssertionError(
f"Data for {len(nonsim_observations)} nonsim attributes were given but "
f"there are {len(self.nonsim_attributes)} nonsim attributes."
)
logger.debug(f"Normalizing obs for attributes: {self.normalized_attributes}")
observations = super()._normalize_obs({**sim_observations, **nonsim_observations})
obs = [observations[attribute] for attribute in self.attributes]
self.state = np.stack(obs, axis=-1).astype(np.float32)
# Update the `FireSimulation` with the (new) initial agent positions.
# NOTE: This is slightly redundant, since we can build the list of points within
# `_create_fire_map()`. For now, it's okay to iterate over `self.agents` twice.
points = []
for agent in self.agents.values():
points.append([agent.col, agent.row, agent.sim_id])
logger.debug(f"Updating `self.sim` with (new) initial agent positions...")
self.sim.update_agent_positions(points)
self.timesteps = 0
self._log_env_reset()
# FIXME: Will need to update creation of `marl_obs`` to handle POMDP.
marl_obs = {ag_id: self.state for ag_id in self._agent_ids}
infos = {ag_id: {} for ag_id in self._agent_ids}
# If the agent(s) places an effective mitigation (not placed in already damaged/mitigated square), this is set to True.
# FIXME Have this tracked across all of the agents
self.true_mitigation_placed: bool = False
#Run the new benchsim to obtain the benchsim data used to generate the rewards and policy
if self.benchmark_sim:
#run benchmark sim to generate the benchmark sim firemaps and metrics for this episode
self._run_benchmark()
return marl_obs, infos
def get_nonsim_attribute_bounds(self) -> OrderedDict[str, Dict[str, int]]: # noqa
nonsim_min_maxes = ordered_dict()
# The values in "fire_map" are:
# - 0: BurnStatus.UNBURNED
# - 1: BurnStatus.BURNING
# - 2: BurnStatus.BURNED
# - 3: BurnStatus.FIRELINE (if "fireline" in self.interactions)
# - 4: BurnStatus.SCRATCHLINE (if "scratchline" in self.interactions)
# - 5: BurnStatus.WETLINE (if "wetline" in self.interactions)
# - X: self._min_sim_agent_id + self.num_agents (value is set in RLHarness.__init__)
nonsim_min_maxes["fire_map"] = {
"min": 0,
"max": max(self._sim_agent_ids),
}
nonsim_min_maxes["bench_fire_map"] = {
"min": 0,
"max": max(self._sim_agent_ids),
}
nonsim_min_maxes["bench_fire_map_final"] = {
"min": 0,
"max": max(self._sim_agent_ids),
}
return nonsim_min_maxes
def get_nonsim_attribute_data(self) -> OrderedDict[str, np.ndarray]: # noqa
nonsim_data = ordered_dict()
nonsim_data["fire_map"] = self._create_fire_map()
nonsim_data["bench_fire_map"] = np.zeros(
(
self.sim.config.area.screen_size[0],
self.sim.config.area.screen_size[0],
)
)
nonsim_data["bench_fire_map_final"] = np.zeros(
(
self.sim.config.area.screen_size[0],
self.sim.config.area.screen_size[0],
)
)
return nonsim_data
def render(self): # noqa
self.sim.rendering = True
# TODO: Finish code to allow manually specifying agent positions.
# def _check_start_pos(self, start_pos: Tuple[int, int]) -> bool:
# # Check that value is in the correct range
# if (
# start_pos[0] < 0
# or start_pos[0] >= self.sim.config.area.screen_size[0]
# or start_pos[1] < 0
# or start_pos[1] >= self.sim.config.area.screen_size[0]
# ):
# return False
# for pos in self.agent_pos:
# if np.array_equal(pos, start_pos):
# return False
# return True
# def _validate_position(self, x, y):
# """Check whether (x,y) is within the bounds of the environment."""
# return all([x >= 0, x < self.width, y >= 0, y < self.height])
# def _check_collision(self, pos1, pos2):
# """Check whether two positions overlap."""
# return pos1[0] == pos2[0] and pos1[1] == pos2[1]
# def _create_agents(self, method='random', pos_list=None):
# """Spawn agents according to the given method and position list."""
# # Initialize empty lists for holding agent objects and positions
# self.agents = []
# self.agent_positions = {}
# if method == 'manual':
# # Validate and assign positions from the input list
# assert len(pos_list) == len(self.agent_ids), \
# f"Number of positions ({len(pos_list)}) does not match number of agents ({len(self.agent_ids)})."
# for i, pos in enumerate(pos_list):
# assert len(pos) == 3, f"Position {i} has invalid length ({len(pos)}, expected 3)"
# agent_id, x, y = pos
# assert agent_id in self.agent_ids, f"Agent ID '{agent_id}' is not recognized."
# assert self._validate_position(x, y), f"Position {pos} is out of bounds."
# for j in range(i+1, len(pos_list)):
# assert not self._check_collision(pos, pos_list[j]), f"Position collision detected between {pos} and {pos_list[j]}."
# self.agents.append(ReactiveAgent(agent_id))
# self.agent_positions[agent_id] = (x, y)
# if method == "manual":
# if len(pos_list) < self.num_agents:
# # Pad with default positions
# num_missing = self.num_agents - len(pos_list)
# logger.warning(
# "%d manual agent position(s) provided; padding with %d defaults.",
# len(pos_list),
# num_missing,
# )
# pos_list += [(f"default{i}", 0, 0) for i in range(num_missing)]
# elif len(pos_list) > self.num_agents:
# # Truncate the list
# num_extra = len(pos_list) - self.num_agents
# logger.warning(
# "%d manual agent position(s) provided; ignoring %d extra.",
# len(pos_list),
# num_extra,
# )
# pos_list = pos_list[: self.num_agents]
def _create_fire_map(self) -> np.ndarray:
"""Prepare the inital copy of `self.sim.fire_map`.
Creates an ndarray of entirely `BurnStatus.UNBURNED`, except for:
- The initial fire postion, which is set to `BurnStatus.BURNING`.
- Each respective agent position is set to the agent's `sim_id`.
"""
fire_map = np.full(self.sim.fire_map.shape, BurnStatus.UNBURNED)
# TODO: Potential place to update the initial fire pos to a new value?
x, y = self.sim.config.fire.fire_initial_position
logger.debug(f"Placing initial fire position at row={y}, col={x}.")
fire_map[y, x] = BurnStatus.BURNING # array should be indexed via (row, col)
for agent in self.agents.values():
# Enforce resetting `self.agents` before calling `_create_fire_map()`.
if agent.initial_position != agent.current_position:
msg = f"The init and curr pos for agent {agent.agent_id} are different!"
raise RuntimeError(msg)
logger.debug(f"Placing {agent.sim_id} at row={agent.row}, col={agent.col}.")
fire_map[agent.row, agent.col] = agent.sim_id
return fire_map
def _create_agents(self, method: str = "random", pos_list: List = None):
"""Create the `ReactiveAgent` objects that will interact with the `FireSimulation`.
This method will create and populate the `agents` attribute.
Arguments:
method: TODO
pos_list: TODO
"""
self.agents: Dict[str, ReactiveAgent] = {}
# Use the user-provided agent positions to initialize the agents on the map.
if method == "manual":
# NOTE: The provided pos_list must be the same length as the number of agents
# TODO: Allow option to randomly generate any "missing" agent positions.
if len(pos_list) != self.num_agents:
raise ValueError(
f"Expected {self.num_agents} agent positions; got {len(pos_list)}."
)
# FIXME: We assume provided pos are valid wrt map dims and agent collisions.
# FIXME: Finish logic HERE to create `self.agents` dict
raise NotImplementedError # adding so I don't forget!
# for agent_info, sim_id in zip(pos_list, sim_agent_ids):
# agent_str, x, y = agent_info
# agent = ReactiveAgent(agent_str, sim_id, (x, y))
# self.agents[agent_str] = agent
# Generate random agent locations for the start of the episode.
elif method == "random":
# Create a boolean mask of valid positions (i.e., inside the boundaries).
mask = np.ones(self.sim.fire_map.shape, dtype=bool)
# Agent (s) can only be spawned on an unburning square
# NOTE: Any other "prohibited" agent start locations can be specified here.
mask[np.where(self.sim.fire_map != BurnStatus.UNBURNED)] = False
# Randomly select unique positions from the valid ones.
idx = np.random.choice(range(mask.sum()), size=self.num_agents, replace=False)
flat_idx = np.argwhere(mask.flatten())[idx].flatten()
agent_locs = np.vstack(np.unravel_index(flat_idx, mask.shape)).T
# Populate the `self.agents` dict with `ReactiveAgent` object (s).
agent_ids = sorted(self._agent_ids, key=lambda x: int(x.split("_")[-1]))
sim_ids = self._sim_agent_ids
for agent_str, sim_id, loc in zip(agent_ids, sim_ids, agent_locs):
agent = ReactiveAgent(agent_str, sim_id, tuple(loc))
self.agents[agent_str] = agent
# This should be caught within the init. To be safe, also raise error here.
else:
raise NotImplementedError(f"Agent spawn method {method} not implemented.")
def _configure_env_rendering(self, should_render: bool) -> None:
"""Configure the environment's `FireSimulation` to be rendered (or not).
If the simulation should be rendered, then the `headless` parameter in the
simulation's config (file) should be set to `False`, enabling the usage of pygame.
Additionally, the environment's `_should_render` attribute is set to ensure
that rendering is active when desired. This is especially important when the
number of eval episodes, specified via `evaluation.evaluation_duration`, is >1.
"""
sim_data = self.sim.config.yaml_data
sim_data["simulation"]["headless"] = not should_render
# Update simulation's config attribute.
logger.info("Updating the `self.sim.config` with new `Config` object...")
self.sim.config = Config(config_dict=sim_data)
# Reset the simulation to ensure that the new config is used.
logger.info(f"Resetting `self.sim` to configure rendering == {should_render}.")
self.sim.reset()
# Update the simulation's rendering attribute to match the provided value.
if should_render:
logger.info("Setting SDL_VIDEODRIVER environment variable to 'dummy'...")
os.environ["SDL_VIDEODRIVER"] = "dummy"
self.sim.rendering = should_render
# Indicate whether the environment's `FireSimulation` should be rendered.
self._should_render = should_render
def _increment_evaluation_iterations(self) -> None:
"""Increment the number of evaluation iterations that have been run."""
self._num_eval_iters += 1
# def _set_agent_pos_for_episode_start(self):
# """Set the agent's initial position in the map for the start of the episode."""
# for agent_id in self._agent_ids:
# valid_pos = False
# # Keep looping until we get a valid position
# while not valid_pos:
# random_pos = self.np_random.integers(
# 0, self.sim.config.area.screen_size, size=2, dtype=int
# )
# valid_pos = self._check_start_pos(random_pos)
# self.agent_pos[agent_id] = random_pos
def _log_env_init(self):
"""Log information about the environment that is being initialized."""
if self._is_eval_env:
i, j = self.worker_idx, self.vector_idx
logger.warning(f"Object {hex(id(self))}: index (i+1)*(j+1) == {(i+1)*(j+1)}")
if not self._debug_mode:
return
# TODO: What log level should we use here?
logger.info(f"Object {hex(id(self))}: worker_index: {self.worker_idx}")
logger.info(f"Object {hex(id(self))}: vector_index: {self.vector_idx}")
logger.info(f"Object {hex(id(self))}: num_workers: {self.num_workers}")
logger.info(f"Object {hex(id(self))}: is_remote: {self.is_remote}")
def _log_env_reset(self):
"""Log information about the environment that is being reset."""
if not self._debug_mode or self._episodes_debugged > self._debug_duration:
return
# TODO: What log level should we use here?
for idx, feat in enumerate(self.attributes):
low, high = self._low[..., idx].min(), self._high[..., idx].max()
obs_min = round(self.state[..., idx].min(), 2)
obs_max = round(self.state[..., idx].max(), 2)
# Log lower bound of the (obs space) and max returned obs for each attribute.
logger.info(f"{feat} LB: {low}, obs min: {obs_min}")
# Log upper (lower) bounds of the returned observations for each attribute.
logger.info(f"{feat} UB: {high}, obs max: {obs_max}")
# Increment the number of episodes that have been debugged.
self._episodes_debugged += 1
def _setup_harness_analytics(self, analytics_partial: partial) -> None:
"""Instantiates the `harness_analytics` used to monitor this `ReactiveHarness` obj.
Arguments:
analytics_partial:
A `functools.partial` object that indicates the top-level
class that will be used to monitor the `ReactiveHarness` object. The user
is expected to provide the `sim_data_partial` keyword argument, along
with a valid value.
Raises:
TypeError: If `harness_analytics_partial.keywords` does not contain a
`sim_data_partial` key with value of type `functools.partial`.
""" | self.harness_analytics: ReactiveHarnessAnalytics | 0 | 2023-12-08 19:13:31+00:00 | 12k |
racinette/querky | querky/querky.py | [
{
"identifier": "one_",
"path": "querky/result_shape.py",
"snippet": "def one_(typename: str | None, *, optional: bool = True) -> typing.Callable[[Query], ResultShape]:\n def late_binding(query: Query) -> One:\n return One(query, typename, optional=optional)\n return late_binding"
},
{
"identifier": "all_",
"path": "querky/result_shape.py",
"snippet": "def all_(typename: str | None) -> typing.Callable[[Query], ResultShape]:\n def late_binding(query: Query) -> All:\n return All(query, typename)\n return late_binding"
},
{
"identifier": "value_",
"path": "querky/result_shape.py",
"snippet": "def value_(annotation: str | TypeMetaData | None = None, *, optional: bool = False) -> typing.Callable[[Query], ResultShape]:\n def late_binding(query: Query) -> Value:\n return Value(query, annotation, optional=optional)\n return late_binding"
},
{
"identifier": "status_",
"path": "querky/result_shape.py",
"snippet": "def status_() -> typing.Callable[[Query], ResultShape]:\n def late_binding(query: Query) -> Status:\n return Status(query)\n return late_binding"
},
{
"identifier": "column_",
"path": "querky/result_shape.py",
"snippet": "def column_(annotation: str | TypeMetaData | None = None, *, elem_optional: bool = False) -> typing.Callable[[Query], ResultShape]:\n def late_binding(query: Query) -> Value:\n return Column(query, annotation, elem_optional=elem_optional)\n return late_binding"
},
{
"identifier": "One",
"path": "querky/result_shape.py",
"snippet": "class One(ResultShape):\n def __init__(self, query: Query, typename: str | None, *, optional: bool = True):\n super().__init__(query)\n\n if self.query.parent_query is None:\n if self.querky.type_factory is not None:\n self.ctor = self.querky.type_factory(self.query, typename)\n else:\n self.ctor = None\n else:\n # забираем конструктор типа из базового запроса\n parent_shape = self.query.parent_query.shape\n if not isinstance(parent_shape, (All, One)):\n raise ValueError(\"Invalid shape, must be a row shape\")\n\n self.ctor = parent_shape.ctor\n # копируем название типа из отеческого запроса\n typename = parent_shape.ctor.typename\n\n if self.ctor.shape is None:\n self.ctor.shape = self\n\n self.optional = optional\n if self.ctor is not None:\n type_meta = TypeMetaData(typename)\n else:\n type_meta = self.query.contract.get_default_record_type_metadata()\n self.return_type = TypeKnowledge(\n metadata=type_meta,\n is_optional=self.optional,\n is_array=False,\n elem_is_optional=None\n )\n self.annotate()\n\n def annotate(self):\n self.query.annotation_generator.annotate(self.return_type, context='result_type')\n\n def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):\n for attribute in self.query.query_signature.attributes:\n try:\n if attr_hint := self.query.attr_hints.get(attribute.name, None):\n attribute.consume_attr(attr_hint)\n self.query.annotation_generator.annotate(attribute.type_knowledge, 'attribute')\n except Exception as ex:\n raise QueryInitializationError(self.query, f\"attribute `{attribute.name}`\") from ex\n if self.ctor is not None:\n if self.ctor.attributes is None:\n self.ctor.set_attributes(attrs)\n elif self.ctor.attributes != attrs:\n raise QueryInitializationError(\n self.query,\n \"Expected the same return type signature, but the attributes are not equal:\\n\"\n f\"Expected: {self.ctor.attributes}\\n\"\n f\"Got: {attrs}\"\n )\n\n def generate_type_code(self) -> typing.List[str] | None:\n if self.ctor is not None and not self.ctor.type_code_generated:\n return self.ctor.generate_type_code()\n else:\n return None\n\n def get_imports(self) -> set[str]:\n s = super().get_imports()\n if self.ctor is not None:\n return s.union(self.ctor.get_imports())\n return s\n\n async def fetch(self, conn, params):\n contract = self.query.module.querky.contract\n row = await contract.fetch_one(conn, self.query, params)\n if self.ctor.row_factory and row is not None:\n row = self.ctor.row_factory(row)\n return row\n\n def fetch_sync(self, conn, params):\n contract = self.query.module.querky.contract\n row = contract.fetch_one_sync(conn, self.query, params)\n if self.ctor.row_factory:\n row = self.ctor.row_factory(row)\n return row\n\n def get_exports(self) -> typing.Sequence[str]:\n if self.ctor is not None:\n return [self.ctor.get_exported_name()]\n else:\n return []"
},
{
"identifier": "All",
"path": "querky/result_shape.py",
"snippet": "class All(One):\n def __init__(self, query: Query, typename: str | None,):\n super().__init__(query, typename, optional=False)\n self.return_type.is_optional = False\n self.return_type.is_array = True\n self.return_type.elem_is_optional = False\n self.query.annotation_generator.annotate(self.return_type, context='result_type')\n\n def annotate(self):\n pass\n\n async def fetch(self, conn, params):\n contract = self.query.module.querky.contract\n rows = await contract.fetch_all(conn, self.query, params)\n if self.ctor.row_factory:\n rows = [\n self.ctor.row_factory(row)\n for row in rows\n ]\n return rows\n\n def fetch_sync(self, conn, params):\n contract = self.query.module.querky.contract\n rows = contract.fetch_all_sync(conn, self.query, params)\n if self.ctor.row_factory:\n rows = [\n self.ctor.row_factory(row)\n for row in rows\n ]\n return rows"
},
{
"identifier": "ResultShape",
"path": "querky/result_shape.py",
"snippet": "class ResultShape(ABC, GetImportsMixin):\n def __init__(self, query: Query) -> None:\n self.query: Query = query\n self.return_type: TypeKnowledge | None = None\n\n @property\n def querky(self):\n return self.query.querky\n\n def get_imports(self) -> set[str]:\n return self.return_type.get_imports()\n\n @abstractmethod\n def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):\n ...\n\n @abstractmethod\n def generate_type_code(self) -> typing.List[str] | None:\n ...\n\n def get_annotation(self) -> str:\n return self.return_type.typehint\n\n @abstractmethod\n async def fetch(self, conn, bound_params):\n ...\n\n @abstractmethod\n async def fetch_sync(self, conn, bound_params):\n ...\n\n @abstractmethod\n def get_exports(self) -> typing.Sequence[str]:\n ..."
},
{
"identifier": "ConnParamConfig",
"path": "querky/conn_param_config.py",
"snippet": "class ConnParamConfig:\n name: str\n\n def create_parameter(\n self,\n query: Query,\n parameters: typing.Sequence[Parameter],\n type_metadata: TypeMetaData\n ) -> tuple[Parameter, TypeKnowledge, int]:\n ..."
},
{
"identifier": "First",
"path": "querky/conn_param_config.py",
"snippet": "class First(ConnParamConfig):\n positional: bool = False\n\n def create_parameter(\n self,\n _query: Query,\n parameters: typing.Sequence[Parameter],\n type_metadata: TypeMetaData\n ) -> tuple[Parameter, TypeKnowledge, int]:\n if self.positional:\n kind = Parameter.POSITIONAL_ONLY\n else:\n if parameters and parameters[0].kind == Parameter.POSITIONAL_ONLY:\n kind = Parameter.POSITIONAL_ONLY\n else:\n kind = Parameter.POSITIONAL_OR_KEYWORD\n\n p = Parameter(self.name, kind)\n return p, TypeKnowledge(type_metadata, False, False, False), 0"
},
{
"identifier": "AnnotationGenerator",
"path": "querky/annotation_generator.py",
"snippet": "class AnnotationGenerator(ABC):\n @abstractmethod\n def generate(self, knowledge: TypeKnowledge, context: str) -> str:\n ...\n\n def annotate(self, knowledge: TypeKnowledge, context: str, force: bool = False) -> None:\n if knowledge.typehint is None or force:\n knowledge.typehint = self.generate(knowledge, context)"
},
{
"identifier": "TypeConstructor",
"path": "querky/type_constructor.py",
"snippet": "class TypeConstructor(typing.Generic[T], GetImportsMixin):\n def __init__(\n self,\n query: Query,\n typename: str,\n required_imports: typing.Set[str],\n row_factory: typing.Callable[[typing.Any], T] | None\n ):\n self.query = query\n self.type_code_generated = False\n self.typename = typename\n self.required_imports = required_imports\n self.shape: typing.Optional[ResultShape] = None\n self.attributes: typing.Optional[typing.Tuple[ResultAttribute, ...]] = None\n self.row_factory = row_factory\n self.type_code_generated: bool = False\n self.attributes_collected: bool = False\n\n def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):\n self.attributes = attrs\n\n def get_imports(self) -> set[str]:\n s = set(self.required_imports)\n for attr in self.attributes:\n s.update(attr.get_imports())\n return s\n\n def get_exported_name(self) -> str:\n return self.typename\n\n def indent(self, i: int) -> str:\n return self.shape.query.querky.get_indent(i)"
},
{
"identifier": "ModuleConstructor",
"path": "querky/module_constructor.py",
"snippet": "class ModuleConstructor:\n def __init__(\n self,\n querky: Querky,\n module: types.ModuleType,\n fullpath: str,\n module_path: str,\n filedir: str\n ):\n self.module = module\n self.querky = querky\n self.imports = set(querky.imports)\n self.exports = set()\n self.fullpath = fullpath\n self.module_path = module_path\n self.filedir = filedir\n\n self.queries_list = []\n\n def indent(self, i: int) -> str:\n return self.querky.get_indent(i)\n\n def _post_init(self):\n # Generate module code\n code = []\n for query in self.queries_list:\n query_code = query.generate_code()\n if not query_code:\n continue\n code.append('')\n code.append('')\n code.extend(query_code)\n code.append('')\n\n # Collect imports\n for query in self.queries_list:\n self.imports.update(query.get_imports())\n\n # Collect exports\n for query in self.queries_list:\n self.exports.update(query.get_exports())\n\n # Create import lines\n imports = [\n *getattr(self.module, '__imports__', []),\n *self.imports\n ]\n\n for query in self.queries_list:\n imports.append(\n f\"from {self.module.__name__} import {query.query.__name__} as {query.local_name}\"\n )\n\n # Imports + Code\n code = [\n *imports,\n *code,\n ]\n\n # If there are exports, create them at the end of the file (__all__)\n if self.exports:\n code.append('')\n code.append('__all__ = [')\n for export in self.exports:\n code.append(f'{self.indent(1)}\"{export}\",')\n code.append(']')\n code.append('')\n\n self.querky.sign_file_contents(code)\n\n code = '\\n'.join(code)\n\n # checking, if file already exists\n file_exists = path.isfile(self.fullpath)\n if file_exists:\n # check, if we can overwrite the contents\n self.querky.check_file_is_mine(self.fullpath)\n\n if self.querky.subdir:\n os.makedirs(self.filedir, exist_ok=True)\n\n with open(self.fullpath, encoding='utf-8', mode='w') as f:\n f.write(code)\n\n async def generate_module(self, db):\n for query in self.queries_list:\n await query.fetch_types(db)\n self._post_init()\n\n def generate_module_sync(self, db):\n for query in self.queries_list:\n query.fetch_types_sync(db)\n self._post_init()"
},
{
"identifier": "TypeMetaData",
"path": "querky/base_types.py",
"snippet": "class TypeMetaData(GetImportsMixin):\n counterpart: str\n required_imports: set[str] | None = None\n\n def get_imports(self) -> set[str]:\n if self.required_imports is None:\n return set()\n return set(self.required_imports)\n\n @classmethod\n def from_type(cls, t: typing.Type) -> TypeMetaData:\n type_name = t.__name__\n module_path = t.__module__\n return TypeMetaData(\n counterpart=type_name,\n required_imports={f\"from {module_path} import {type_name}\"}\n )"
},
{
"identifier": "Query",
"path": "querky/query.py",
"snippet": "class Query(typing.Generic[RS]):\n defaults: dict[str, typing.Any]\n\n def __init__(\n self,\n func: typing.Callable,\n shape: typing.Callable[[Query], RS],\n module: ModuleConstructor,\n conn_param_config: ConnParamConfig,\n explicit_name: typing.Optional[str],\n parent_query: typing.Optional[Query[One | All]],\n kwargs: typing.Optional[typing.Dict[str, typing.Any]]\n ) -> None:\n self.parent_query: Query[One | All] | None = parent_query\n\n self.imports = set()\n self.kwargs = kwargs or dict()\n self.query = func\n self.name = explicit_name or func.__name__\n self.conn_param_config = conn_param_config\n\n self.sig = inspect.signature(func)\n self.template_signature = None\n\n self.module = module\n self.module.queries_list.append(self)\n\n self.param_mapper: ParamMapper = self.contract.create_param_mapper(self)\n self.sql = self.param_mapper.parametrize_query()\n self.default = DictGetAttr(self.param_mapper.defaults)\n # side effect: attr gets populated, so we flush it\n self.attr_hints: dict[str, Attr] = {\n a.name: a\n for a in _attr_.__getattrs__()\n }\n\n module_filename = self.module.module.__file__\n common = path.commonprefix([module.querky.basedir, module_filename])\n self.relative_path = module_filename[len(common):]\n self.unique_name = f\"{self.relative_path}:{self.query.__name__}\"\n self.local_name = self.get_local_name()\n\n self.query_signature: QuerySignature | None = None\n self.conn_type_knowledge: TypeKnowledge | None = None\n\n self.bound_type = None\n self.shape: ResultShape = shape(self)\n\n if not isinstance(self.shape, (One, All)) and parent_query:\n raise ValueError(\"Only One and All queries can have a parent query.\")\n if parent_query and not isinstance(parent_query.shape, (One, All)):\n raise ValueError(\"Parent query must be of either One or All shape.\")\n\n logger.debug(\n \"Query: %s\\nSQL: %s\",\n self.unique_name, self.sql\n )\n\n @property\n def annotation_generator(self):\n return self.querky.annotation_generator\n\n @property\n def contract(self):\n return self.module.querky.contract\n\n @property\n def querky(self):\n return self.module.querky\n\n def bind_type(self, t) -> None:\n self.bound_type = t\n\n async def execute(self, conn, *args, **kwargs):\n params = self.param_mapper.map_params(*args, **kwargs)\n return await self.shape.fetch(conn, params)\n\n def execute_sync(self, conn, *args, **kwargs):\n params = self.param_mapper.map_params(*args, **kwargs)\n return self.shape.fetch_sync(conn, params)\n\n def _after_types_fetched(self):\n # типы параметров передадим мапперу\n self.param_mapper.assign_type_knowledge(self.query_signature.parameters)\n # а типы аттрибутов - результату\n self.shape.set_attributes(self.query_signature.attributes)\n\n async def fetch_types(self, db) -> None:\n try:\n self.query_signature = await self.contract.get_query_signature(db, self)\n self._after_types_fetched()\n except QueryInitializationError:\n raise\n except Exception as ex:\n raise QueryInitializationError(self, additional_hint=\"fetching types\") from ex\n\n def fetch_types_sync(self, db) -> None:\n try:\n self.query_signature = self.contract.get_query_signature_sync(db, self)\n self._after_types_fetched()\n except QueryInitializationError:\n raise\n except Exception as ex:\n raise QueryInitializationError(self, additional_hint=\"fetching types\") from ex\n\n def string_signature(self):\n return f\"{self.relative_path}: {self.query.__name__}{self.sig}\"\n\n def get_local_name(self) -> str:\n return f\"_q{self.module.queries_list.index(self)}\"\n\n def _generate_proxy_function_code(self):\n try:\n new_params = []\n\n for param in self.param_mapper.params:\n name = param.name\n\n old_param = param.param\n\n if old_param.default is not inspect._empty:\n default = ReprHelper(f\"{self.local_name}.default.{name}\")\n else:\n default = inspect._empty\n\n typehint = param.type_knowledge.typehint\n if typehint is None:\n raise QueryInitializationError(\n self,\n f\"{param.name}: parameter type annotation is missing\"\n )\n\n new_params.append(\n Parameter(\n name,\n old_param.kind,\n annotation=ReprHelper(typehint),\n default=default\n )\n )\n\n conn_param, type_knowledge, index = self.conn_param_config.create_parameter(\n self,\n new_params,\n self.contract.get_connection_type_metadata()\n )\n self.conn_type_knowledge = type_knowledge\n self.annotation_generator.annotate(type_knowledge, context='conn_param')\n if type_knowledge.typehint is not None:\n conn_param = conn_param.replace(annotation=ReprHelper(type_knowledge.typehint))\n\n new_params.insert(index, conn_param)\n\n return_annotation = self.shape.get_annotation()\n if return_annotation is None:\n raise QueryInitializationError(\n self,\n f\"return type annotation is missing\"\n )\n\n return_annotation_repr = ReprHelper(return_annotation)\n\n self.new_signature = self.sig.replace(\n parameters=new_params,\n return_annotation=return_annotation_repr\n )\n\n is_async = self.contract.is_async()\n async_ = 'async ' if is_async else ''\n await_ = 'await ' if is_async else ''\n _sync = \"_sync\" if not is_async else ''\n\n conn_str = self.conn_param_config.name\n\n arg_remap_string = self.param_mapper.mirror_arguments()\n arg_string = f\"{conn_str}, {arg_remap_string}\"\n\n try:\n code = [\n f\"{async_}def {self.name}{self.new_signature}:\",\n f\"{self.querky.get_indent(1)}return {await_}{self.local_name}.execute{_sync}({arg_string})\"\n ]\n except Exception as _ex:\n # for debugging\n raise\n\n logger.debug('[OK] - %s', self.unique_name)\n return code\n except Exception as ex:\n logger.exception('[BAD] - %s', self.unique_name)\n raise ex\n\n def get_type_bind_ident(self) -> typing.Optional[str]:\n if isinstance(self.shape, (Value, Column, Status)):\n return None\n elif isinstance(self.shape, (One, All)):\n if self.shape.ctor:\n return self.shape.ctor.typename\n return None\n\n def get_exports(self):\n exports = {\n self.name,\n *self.shape.get_exports()\n }\n if parent := self.parent_query:\n parent_shape = parent.shape\n if not isinstance(parent_shape, (One, All)):\n raise ValueError(\"parent shape must be ether One or All\")\n shape: typing.Union[One, All] = parent_shape\n exports.add(shape.ctor.typename)\n return exports\n\n def get_imports(self):\n imports = set(self.imports)\n for elem in self.param_mapper.params:\n imports.update(elem.get_imports())\n\n if self.conn_type_knowledge is not None:\n imports.update(self.conn_type_knowledge.get_imports())\n\n if (parent := self.parent_query) and parent.module is not self.module:\n parent_shape = parent.shape\n if isinstance(parent_shape, (One, All)):\n imports.add(\n f\"from {parent.module.module_path} import {parent_shape.ctor.typename}\"\n )\n else:\n raise ValueError(\"you can only use return types from 'one' and 'many' queries\")\n else:\n # we're gonna create the type from scratch, so we need the imports\n imports.update(self.shape.get_imports())\n\n return imports\n\n def generate_code(self):\n lines = []\n # data type code\n if type_code := self.shape.generate_type_code():\n if cb := self.module.querky.on_before_type_code_emit:\n type_code = cb(type_code, self)\n lines.extend(type_code)\n lines.append('')\n lines.append('')\n\n # proxy function code, which simply accepts annotated arguments and proxies the call to this query\n func_code = self._generate_proxy_function_code()\n if cb := self.module.querky.on_before_func_code_emit:\n func_code = cb(func_code, self)\n lines.extend(func_code)\n\n if bound_type_ident := self.get_type_bind_ident():\n # binding return type to the underlying query\n lines.append('')\n lines.append(f'{self.local_name}.bind_type({bound_type_ident})')\n\n return lines\n\n def __call__(self, conn, *args, **kwargs):\n if self.contract.is_async():\n return self.execute(conn, *args, **kwargs)\n else:\n return self.execute_sync(conn, *args, **kwargs)"
},
{
"identifier": "Contract",
"path": "querky/contract.py",
"snippet": "class Contract(ABC):\n @abstractmethod\n def create_param_mapper(self, query: Query) -> ParamMapper:\n ...\n\n @abstractmethod\n def get_default_record_type_metadata(self) -> TypeMetaData:\n ...\n\n @abstractmethod\n def get_connection_type_metadata(self) -> TypeMetaData:\n ...\n\n @abstractmethod\n async def get_query_signature(self, db, query: Query) -> QuerySignature:\n ...\n\n @abstractmethod\n def get_query_signature_sync(self, db, query: Query) -> QuerySignature:\n ...\n\n @abstractmethod\n def is_async(self) -> bool:\n ...\n\n @abstractmethod\n async def fetch_value(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_one(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_all(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_column(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_status(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def raw_execute(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetchval(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetchone(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetch(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n def fetch_value_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_one_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_all_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_column_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_status_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def raw_execute_sync(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetchval_sync(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n def raw_fetchone_sync(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n def raw_fetch_sync(self, conn, sql: str, params):\n ..."
}
] | import importlib
import types
import typing
import inspect
import os
import logging
from types import ModuleType
from os import path
from querky.result_shape import one_, all_, value_, status_, column_, One, All, ResultShape
from querky.conn_param_config import ConnParamConfig, First
from querky.annotation_generator import AnnotationGenerator
from querky.type_constructor import TypeConstructor
from querky.module_constructor import ModuleConstructor
from querky.base_types import TypeMetaData
from querky.query import Query
from querky.contract import Contract | 7,304 | self.imports = imports or set()
self.indent = indent
self.annotation_generator = annotation_generator
self.module_ctors: dict[types.ModuleType, ModuleConstructor] = dict()
self.type_factory = type_factory
if conn_param_config is None:
conn_param_config = First(name='__conn', positional=True)
self.conn_param_config = conn_param_config
self.contract = contract
self.subdir = subdir
if self.subdir and not str.isidentifier(self.subdir):
raise ValueError("subdir must be a valid python identifier")
self.file_signature = "# ~ AUTOGENERATED BY QUERKY ~ #"
def get_indent(self, i: int):
return self.indent * i
def create_query(
self,
fn: typing.Callable[[...], str],
shape: typing.Callable[[Query], ResultShape],
conn_param_config: ConnParamConfig | None,
explicit_name: str | None,
parent_query: typing.Optional[Query],
kwargs: typing.Optional[typing.Dict[str, typing.Any]]
) -> Query:
module = inspect.getmodule(fn)
if module in self.module_ctors:
module_ctor = self.module_ctors[module]
else:
filename = self.generate_filename(module)
if not str.isidentifier(filename):
raise ValueError(f"Generated a filename which is not a valid python identifier: {filename}")
filedir = path.dirname(module.__file__)
new_module_name = module.__name__.rsplit('.', maxsplit=1)[0]
if self.subdir:
filedir = path.join(filedir, self.subdir)
new_module_name = f"{new_module_name}.{self.subdir}"
fullpath = path.join(filedir, f'{filename}.py')
new_module_name = f"{new_module_name}.{filename}"
module_ctor = ModuleConstructor(self, module, fullpath, new_module_name, filedir)
self.module_ctors[module] = module_ctor
return self.query_class(
fn,
shape,
module_ctor,
self.conn_param_config or conn_param_config,
explicit_name,
parent_query,
kwargs
)
def query(
self,
arg: str | TypeMetaData | Query | typing.Callable[[...], str] | None = None,
*,
shape: ShapeStringRepr = 'status',
optional: bool | None = None,
**kwargs
) -> QueryDef | Query:
def wrapper(fn: typing.Callable[[...], str]) -> Query:
nonlocal optional
if shape in ['many', 'one']:
if isinstance(arg, TypeMetaData):
raise ValueError(
"TypeMetaData is not supported for `many` or `one` constructors. "
"Use it only for `one` and `column` constructors."
)
if not isinstance(arg, Query):
if arg is None:
# if we don't have a name provided for us, we're gonna create it out of the function name
type_name = to_camel_case(fn.__name__)
else:
type_name = arg
if not type_name.isidentifier():
raise ValueError(f"Name type should be a valid python identifier. You provided: {type_name}")
else:
type_name = None
type_name: str | None
if shape == 'many':
if optional is not None:
raise TypeError(
'ALL constructor does not accept `optional` flag -- '
'at least an empty set will always be returned'
)
created_shape = all_(type_name)
else:
if optional is None:
optional = True
created_shape = one_(type_name, optional=optional)
elif shape in ['value', 'column']:
if arg is None:
annotation = None
else:
annotation = arg
if shape == 'value':
if optional is None:
optional = True
created_shape = value_(annotation, optional=optional)
else:
if optional is None:
optional = False
| from __future__ import annotations
logger = logging.getLogger("querky")
def to_camel_case(snake_str):
return "".join(x.capitalize() for x in snake_str.lower().split("_"))
ShapeStringRepr = typing.Literal["one", "many", "column", "value", "status"]
QueryDef = typing.Callable[[typing.Callable[[...], str]], Query]
class Querky:
def __init__(
self,
basedir: str | None = None,
annotation_generator: AnnotationGenerator | None = None,
contract: Contract | None = None,
conn_param_config: ConnParamConfig | None = None,
type_factory: typing.Callable[[Query, str], TypeConstructor] | None = None,
subdir: str | None = "queries",
on_before_func_code_emit: typing.Optional[typing.Callable[[typing.List[str], Query], typing.List[str]]] = None,
on_before_type_code_emit: typing.Optional[typing.Callable[[typing.List[str], Query], typing.List[str]]] = None,
imports: typing.Optional[typing.Set[str]] = None,
indent: str = ' ',
query_class: typing.Type[Query] = Query
):
self.basedir = basedir
self.on_before_func_code_emit = on_before_func_code_emit
self.on_before_type_code_emit = on_before_type_code_emit
self.query_class = query_class
self.imports = imports or set()
self.indent = indent
self.annotation_generator = annotation_generator
self.module_ctors: dict[types.ModuleType, ModuleConstructor] = dict()
self.type_factory = type_factory
if conn_param_config is None:
conn_param_config = First(name='__conn', positional=True)
self.conn_param_config = conn_param_config
self.contract = contract
self.subdir = subdir
if self.subdir and not str.isidentifier(self.subdir):
raise ValueError("subdir must be a valid python identifier")
self.file_signature = "# ~ AUTOGENERATED BY QUERKY ~ #"
def get_indent(self, i: int):
return self.indent * i
def create_query(
self,
fn: typing.Callable[[...], str],
shape: typing.Callable[[Query], ResultShape],
conn_param_config: ConnParamConfig | None,
explicit_name: str | None,
parent_query: typing.Optional[Query],
kwargs: typing.Optional[typing.Dict[str, typing.Any]]
) -> Query:
module = inspect.getmodule(fn)
if module in self.module_ctors:
module_ctor = self.module_ctors[module]
else:
filename = self.generate_filename(module)
if not str.isidentifier(filename):
raise ValueError(f"Generated a filename which is not a valid python identifier: {filename}")
filedir = path.dirname(module.__file__)
new_module_name = module.__name__.rsplit('.', maxsplit=1)[0]
if self.subdir:
filedir = path.join(filedir, self.subdir)
new_module_name = f"{new_module_name}.{self.subdir}"
fullpath = path.join(filedir, f'{filename}.py')
new_module_name = f"{new_module_name}.{filename}"
module_ctor = ModuleConstructor(self, module, fullpath, new_module_name, filedir)
self.module_ctors[module] = module_ctor
return self.query_class(
fn,
shape,
module_ctor,
self.conn_param_config or conn_param_config,
explicit_name,
parent_query,
kwargs
)
def query(
self,
arg: str | TypeMetaData | Query | typing.Callable[[...], str] | None = None,
*,
shape: ShapeStringRepr = 'status',
optional: bool | None = None,
**kwargs
) -> QueryDef | Query:
def wrapper(fn: typing.Callable[[...], str]) -> Query:
nonlocal optional
if shape in ['many', 'one']:
if isinstance(arg, TypeMetaData):
raise ValueError(
"TypeMetaData is not supported for `many` or `one` constructors. "
"Use it only for `one` and `column` constructors."
)
if not isinstance(arg, Query):
if arg is None:
# if we don't have a name provided for us, we're gonna create it out of the function name
type_name = to_camel_case(fn.__name__)
else:
type_name = arg
if not type_name.isidentifier():
raise ValueError(f"Name type should be a valid python identifier. You provided: {type_name}")
else:
type_name = None
type_name: str | None
if shape == 'many':
if optional is not None:
raise TypeError(
'ALL constructor does not accept `optional` flag -- '
'at least an empty set will always be returned'
)
created_shape = all_(type_name)
else:
if optional is None:
optional = True
created_shape = one_(type_name, optional=optional)
elif shape in ['value', 'column']:
if arg is None:
annotation = None
else:
annotation = arg
if shape == 'value':
if optional is None:
optional = True
created_shape = value_(annotation, optional=optional)
else:
if optional is None:
optional = False | created_shape = column_(annotation, elem_optional=optional) | 4 | 2023-12-13 15:16:34+00:00 | 12k |
javrtg/C2P | tests/test_constraints.py | [
{
"identifier": "constraints",
"path": "nonmin_pose/constraints/constraints.py",
"snippet": "def assert_smaller_idxes(param1i, param2i):\n def __init__(self, name: str, block: int, block_ids: List[int]):\n def __init__(\n self,\n params: dict,\n idx_first_el: int,\n idx_first_eq: int = 0,\n drop_eqs: Optional[List[int]] = None,\n ):\n def flatten_eqs_info(self, idx_first_eq, blocks, rows, cols, drop_eqs):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def aggregate_data(f0, f1):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def aggregate_data(f0, f1):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def aggregate_data_1st_ineq(f0: np.ndarray, f1: np.ndarray):\n def aggregate_data_2nd_ineq(f0: np.ndarray, f1: np.ndarray):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\nclass Parameter:\nclass Constraint(ABC):\nclass Adjoint(Constraint):\nclass NormT(Constraint):\nclass NormQ(Constraint):\nclass NormE(Constraint):\nclass Homogenization(Constraint):\nclass CheiralityTranslationV2(Constraint):\nclass CheiralityRotation(Constraint):\nclass ManifDefLeft(Constraint):\nclass ManifDefRight(Constraint):\nclass EDefLeft(Constraint):\nclass EDefRight(Constraint):\nclass EDefLeftRight(Constraint):\nclass RightNullSpace(Constraint):\nclass LeftNullSpace(Constraint):\nclass CheiralityTranslation(Constraint):\nclass CheiralityRotationQ(Constraint):\nclass CheiralityMidpoint(Constraint):\nclass Orthogonality(Constraint):\nclass DeterminantR(Constraint):\nclass TQDefinition(Constraint):\nclass SkewTQDefinition(Constraint):\nclass ConvexHullSO3(Constraint):\n CONSTRAINT_IDX_PER_EQ: List[List[int]]\n COEFFS_PER_EQ: List[List[float]]\n CONSTRAINT_VALUES: List[float]\n EQUATION = \"adj(E) = qt^T\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"||t||^2 = 1\"\n COEFFS_PER_EQ = [[1.0, 1.0, 1.0]]\n CONSTRAINT_VALUES = [1.0]\n EQUATION = \"||q||^2 = 1\"\n COEFFS_PER_EQ = [[1.0, 1.0, 1.0]]\n CONSTRAINT_VALUES = [1.0]\n EQUATION = \"norm(E) = 2\"\n COEFFS_PER_EQ = [[1.0] * 9]\n CONSTRAINT_VALUES = [2.0]\n E = params[\"E\"]\n EQUATION = \"h^2 = 1\"\n COEFFS_PER_EQ = [[1.0]]\n CONSTRAINT_VALUES = [1.0]\n EQUATION = \"f0^T t01 - q^T f1 - sct^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 6 + [-1.0]]\n CONSTRAINT_VALUES = [0.0]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"f1^T E01^T [t01] f0 - scr^2 =0\"\n COEFFS_PER_EQ = [[1.0] * 18 + [-1.0]]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"E E^T = [t][t]^T\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 6\n EQUATION = \"E^T E = [q][q]^T\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 6\n EQUATION = \"hE = [t]R\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"hE = R[q]\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"[t]R = R[q] = 0\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, 1.0, -1.0],\n [1.0, -1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0, -1.0],\n [-1.0, 1.0, 1.0, -1.0],\n [-1.0, 1.0, -1.0, 1.0],\n [-1.0, 1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0, -1.0],\n [1.0, -1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"E q = 0\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 3\n EQUATION = \"E^T t = 0\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 3\n EQUATION = \"f0^T R01 q - t01^T R01 f1 - s1^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 18 + [-1.0]]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"f0^T E01 [q] f1 + scr^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 19]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"f0^T R f1 - t^T R f1 - scm1^2 = 0, f0^T R q - f1^T q - scm2^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 27 + [-1.0], [1.0] * 27 + [-1.0]]\n CONSTRAINT_VALUES = [0.0, 0.0]\n EQUATION = \"R R.T = I, R.T R = I\"\n COEFFS_PER_EQ = [[1.0, 1.0, 1.0]] * 11\n CONSTRAINT_VALUES = [1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0]\n R = params[\"R\"]\n EQUATION = \"hR = cofactor(R)\"\n COEFFS_PER_EQ = [[1.0, -1.0, 1.0]] * 9\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"ht - Rq = 0; hq - R^Tt = 0\"\n COEFFS_PER_EQ = [[1.0, -1.0, -1.0, -1.0]] * 6\n CONSTRAINT_VALUES = [0.0] * 6\n EQUATION = \"h[t] - ER^T, h[q] - R^T E\"\n COEFFS_PER_EQ = [\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 18\n EQUATION = \"conv SO(3)\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0, 1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, 1.0, -1.0, 1.0],\n [1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0]"
},
{
"identifier": "Parameter",
"path": "nonmin_pose/constraints/constraints.py",
"snippet": "class Parameter:\n \"\"\"Class for defining a parameter.\n\n Attributes:\n name: e.g. E, R, t, etc. This MUST match the name being used on the constraints.\n block: 1-based index of the block.\n block_ids: 1-based index of each parameter element in the block.\n \"\"\"\n\n __slots__ = (\"name\", \"block\", \"block_ids\")\n\n def __init__(self, name: str, block: int, block_ids: List[int]):\n assert block > 0, \"block must be positive\"\n assert all(idx > 0 for idx in block_ids), \"block_id must be positive\"\n\n self.name = name\n self.block = block\n self.block_ids = block_ids"
},
{
"identifier": "SyntheticData",
"path": "tests/testing_utils.py",
"snippet": "class SyntheticData:\n \"\"\"Data generation based on [1, Sec. 7.2.1] and [2].\n\n [1] An Efficient Solution to Non-Minimal Case Essential Matrix Estimation, J.Zhao.\n [2] https://github.com/jizhaox/npt-pose/blob/master/src/create2D2DExperiment.cpp\n \"\"\"\n\n def __init__(self, seed=0, min_depth=4.0, max_depth=8.0, focal=800.0) -> None:\n self.rng = np.random.default_rng(seed)\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.focal = focal # pixels\n\n def generate_data(\n self,\n transl_magnitude=2.0,\n euler_ang_magnitude=0.5,\n max_npoints=200,\n noise_level=0.0,\n scale_t=None,\n Rw1=None,\n tw1=None,\n ):\n \"\"\"Generate synthetic data.\"\"\"\n # absolute camera poses (w.r.t. world \"w\" reference).\n Rw0, tw0 = self.cam0_absolute_pose\n Rw1, tw1 = self.set_cam1_absolute_pose(\n transl_magnitude, euler_ang_magnitude, scale_t, Rw1, tw1\n )\n # relative pose such that p0 = R01 * p1 + t01.\n R01, t01, t01_unit = self.compute_relative_pose(Rw0, tw0, Rw1, tw1, scale_t)\n E01 = skew(t01_unit) @ R01\n f0_noisy, f1_noisy = self.generate_bearings(\n Rw0, tw0, Rw1, tw1, max_npoints, noise_level\n )\n return {\n \"f0\": f0_noisy,\n \"f1\": f1_noisy,\n \"R01\": R01,\n \"t01\": t01,\n \"t01_unit\": t01_unit,\n \"E01\": E01,\n }\n\n def generate_bearings(self, Rw0, tw0, Rw1, tw1, max_npoints, noise_level):\n # generate 3D points sampling from a unit cube.\n pw = self.generate_absolute_3d_points(max_npoints)\n\n # transform points to each camera reference.\n p0 = Rw0.T @ pw - Rw0.T @ tw0\n p1 = Rw1.T @ pw - Rw1.T @ tw1\n\n # corresponding bearing vectors.\n f0 = p0 / np.linalg.norm(p0, axis=0)\n f1 = p1 / np.linalg.norm(p1, axis=0)\n\n # add noise to the bearing vectors.\n f0_noisy = self.add_noise_to_bearings(f0, max_npoints, noise_level)\n f1_noisy = self.add_noise_to_bearings(f1, max_npoints, noise_level)\n return f0_noisy, f1_noisy\n\n def generate_absolute_3d_points(self, max_npoints):\n \"\"\"Sample 3D points sampling from a unit cube.\"\"\"\n unit_cube = self.rng.uniform(-0.5, 0.5, (3, max_npoints))\n directions = unit_cube / np.linalg.norm(unit_cube, axis=0)\n magnitudes = self.rng.uniform(self.min_depth, self.max_depth, (1, max_npoints))\n pw = magnitudes * directions\n return pw\n\n def add_noise_to_bearings(self, f, n, noise_level):\n \"\"\"Add noise to each bearing vector assuming spherical cameras.\n\n The noise, in pixels, is added in the tangent plane of each bearing. The\n distance of each tangent plane is determined by the focal length of the camera.\n \"\"\"\n cols_idx = np.arange(n)\n\n max_args, min_args = np.abs(f).argmax(0), np.abs(f).argmin(0)\n max_vals, min_vals = f[max_args, cols_idx], f[min_args, cols_idx]\n\n # first perpendicular vector.\n ortho_a = np.zeros((3, n))\n ortho_a[min_args, cols_idx] = 1.0\n ortho_a[max_args, cols_idx] = -min_vals / max_vals\n ortho_a = ortho_a / np.linalg.norm(ortho_a, axis=0)\n\n # second perpendicular vector.\n ortho_b = np.cross(f, ortho_a, axis=0)\n\n # add gaussian noise to each bearing.\n noise = self.rng.normal(0, noise_level, (2, n))\n f_noisy = self.focal * f + noise[0] * ortho_a + noise[1] * ortho_b\n f_noisy = f_noisy / np.linalg.norm(f_noisy, axis=0)\n return f_noisy\n\n def set_cam1_absolute_pose(\n self, transl_magnitude, euler_ang_magnitude, scale_t, Rw1, tw1\n ):\n \"\"\"camera 1 pose (w.r.t. world \"w\" reference).\"\"\"\n if Rw1 is None:\n euler_angles = self.rng.uniform(\n -euler_ang_magnitude, euler_ang_magnitude, (3,)\n )\n Rw1 = R.from_euler(\"zyx\", euler_angles).as_matrix()\n\n if tw1 is None:\n tw1 = transl_magnitude * self.rng.uniform(-1, 1, (3, 1))\n\n if scale_t is not None:\n # set translation magnitude, useful e.g. for accuracy vs translation length.\n tw1 = tw1 / np.linalg.norm(tw1) * scale_t\n return Rw1, tw1\n\n def compute_relative_pose(self, Rw0, tw0, Rw1, tw1, scale_t):\n \"\"\"Compute relative pose such that p0 = R01 * p1 + t01.\"\"\"\n R01 = Rw0.T @ Rw1\n t01 = Rw0.T @ (tw1 - tw0)\n if scale_t is None or scale_t > 0:\n t01_unit = t01 / np.linalg.norm(t01)\n else:\n # when there is pure rotation, any unit translation would satisfy the\n # epipolar constraint, e.g. we set it here to the x-axis unit vector.\n t01_unit = np.array([[1.0], [0], [0]])\n return R01, t01, t01_unit\n\n @property\n def cam0_absolute_pose(self):\n \"\"\"Camera 0 pose (w.r.t. world \"w\" reference).\"\"\"\n return np.eye(3), np.zeros((3, 1))"
},
{
"identifier": "adjoint_of_3x3_mat",
"path": "tests/testing_utils.py",
"snippet": "def adjoint_of_3x3_mat(E):\n \"\"\"Adjoint of a 3x3 matrix (valid for an essential matrix).\"\"\"\n assert E.shape == (3, 3)\n det = np.linalg.det\n det_minor00 = det(E[1:, 1:])\n det_minor01 = -det(E[1:, ::2])\n det_minor02 = det(E[1:, :2])\n det_minor10 = -det(E[::2, 1:])\n det_minor11 = det(E[::2, ::2])\n det_minor12 = -det(E[::2, :2])\n det_minor20 = det(E[:2, 1:])\n det_minor21 = -det(E[:2, ::2])\n det_minor22 = det(E[:2, :2])\n\n # adjugate/adjoint is the *transpose* of the matrix of cofactors.\n adj = np.array(\n [\n [det_minor00, det_minor10, det_minor20],\n [det_minor01, det_minor11, det_minor21],\n [det_minor02, det_minor12, det_minor22],\n ]\n )\n return adj"
},
{
"identifier": "sdpa2mat",
"path": "tests/testing_utils.py",
"snippet": "def sdpa2mat(constraint, block_sizes=[29], ndim=29):\n \"\"\"Converts SDPA format to matrix form.\"\"\"\n con_idx, blocks, values, rows, cols, coeffs = (\n constraint.constraint_idx,\n constraint.blocks,\n constraint.values,\n constraint.rows,\n constraint.cols,\n constraint.coeffs,\n )\n n_constraints = len(values)\n assert (np.unique(con_idx) == np.arange(1, n_constraints + 1)).all()\n assert (np.unique(blocks) == np.arange(1, len(block_sizes) + 1)).all()\n assert ndim == sum(block_sizes)\n\n # initialize and fill matrices of constraints.\n As = np.zeros((n_constraints, ndim, ndim))\n for block, constraint, row, col, coef in zip(blocks, con_idx, rows, cols, coeffs):\n # 0-based indexing.\n block, constraint, row, col = block - 1, constraint - 1, row - 1, col - 1\n rc_offset = sum(block_sizes[:block])\n row += rc_offset\n col += rc_offset\n As[constraint, row, col] = coef\n return As"
},
{
"identifier": "skew",
"path": "tests/testing_utils.py",
"snippet": "def skew(v):\n out = np.zeros((3, 3))\n out[0, 1] = -v[2, 0]\n out[0, 2] = v[1, 0]\n out[1, 0] = v[2, 0]\n out[1, 2] = -v[0, 0]\n out[2, 0] = -v[1, 0]\n out[2, 1] = v[0, 0]\n return out"
},
{
"identifier": "so3_orbitope",
"path": "tests/testing_utils.py",
"snippet": "def so3_orbitope(R):\n \"\"\"\n [1 + r00 + r11 + r22, r21 - r12, r02 - r20, r10 - r01 ]\n [r21 - r12, 1 + r00 - r11 - r22, r10 + r01, r02 + r20 ]\n [r02 - r20, r10 + r01, 1 - r00 + r11 - r22, r21 + r12 ]\n [r10 - r01, r02 + r20, r21 + r12, 1 - r00 - r11 + r22]\n \"\"\"\n r00, r01, r02, r10, r11, r12, r20, r21, r22 = R.ravel()\n return np.array(\n [\n [1 + r00 + r11 + r22, r21 - r12, r02 - r20, r10 - r01],\n [r21 - r12, 1 + r00 - r11 - r22, r10 + r01, r02 + r20],\n [r02 - r20, r10 + r01, 1 - r00 + r11 - r22, r21 + r12],\n [r10 - r01, r02 + r20, r21 + r12, 1 - r00 - r11 + r22],\n ]\n )"
}
] | import numpy as np
from nonmin_pose.constraints import constraints
from nonmin_pose.constraints.constraints import Parameter
from tests.testing_utils import (
SyntheticData,
adjoint_of_3x3_mat,
sdpa2mat,
skew,
so3_orbitope,
) | 7,275 |
CFG_DATASET = {
"seed": 0,
"min_depth": 4.0,
"max_depth": 8.0,
"focal": 800.0,
}
CFG_DATA = {
"transl_magnitude": 1.0,
"euler_ang_magnitude": 0.5,
"max_npoints": 100,
"noise_level": 0.0,
}
def create_parameters():
params = [
Parameter("E", 1, list(range(1, 10))),
Parameter("t", 1, list(range(10, 13))),
Parameter("q", 1, list(range(13, 16))),
Parameter("h", 1, [16]),
Parameter("R", 1, list(range(17, 26))),
Parameter("sct", 1, [26]),
Parameter("scr", 1, [27]),
Parameter("scr2", 1, [28]),
Parameter("scm1", 1, [29]),
Parameter("scm2", 1, [30]),
Parameter("Zc", 1, list(range(31, 47))),
]
return {p.name: p for p in params}
def sample_data():
dataset = SyntheticData(**CFG_DATASET)
data = dataset.generate_data(**CFG_DATA)
h, sct, scr, scr2, scm1, scm2 = 1.0, 0.0, 0.0, 0.0, 0.0, 0.0
q = data["R01"].T @ data["t01_unit"]
x = np.concatenate(
(
data["E01"].ravel(),
data["t01_unit"].ravel(),
q.ravel(),
[h],
data["R01"].ravel(),
[sct, scr, scr2, scm1, scm2],
so3_orbitope(data["R01"]).ravel(),
)
)
return x[:, None], data
def gather_errors(x, A, constraint, constraint_num, is_inequality):
values = constraint.values
if is_inequality:
cond_sdpa_sdpa = np.allclose(values, np.zeros_like(values))
cond_data_sdpa = np.allclose((x.T @ A @ x).squeeze(), constraint_num)
else:
cond_sdpa_sdpa = np.allclose((x.T @ A @ x).squeeze(), values)
cond_data_sdpa = np.allclose(constraint_num, values)
errors = []
if not cond_sdpa_sdpa:
if is_inequality:
errors.append("SDPA coefficients are not zero.")
else:
errors.append("SDPA coefficients lead to different SDPA values.")
if not cond_data_sdpa:
errors.append(
"SDPA values are different than those derived from data."
f"\n{(x.T @ A @ x).squeeze()}\n{constraint_num}"
)
success = len(errors) == 0
err_msg = "Errors:\n{}".format("\n".join(errors))
return success, err_msg
def obtain_errors(constraint_class, x, constraint_num, f0=None, f1=None):
params = create_parameters()
constraint = constraint_class(params, 0, 0, None)
is_inequality = constraint.__class__.__name__.startswith("Cheirality")
if is_inequality:
constraint.compute_coeffs(constraint.coeffs, f0, f1)
|
CFG_DATASET = {
"seed": 0,
"min_depth": 4.0,
"max_depth": 8.0,
"focal": 800.0,
}
CFG_DATA = {
"transl_magnitude": 1.0,
"euler_ang_magnitude": 0.5,
"max_npoints": 100,
"noise_level": 0.0,
}
def create_parameters():
params = [
Parameter("E", 1, list(range(1, 10))),
Parameter("t", 1, list(range(10, 13))),
Parameter("q", 1, list(range(13, 16))),
Parameter("h", 1, [16]),
Parameter("R", 1, list(range(17, 26))),
Parameter("sct", 1, [26]),
Parameter("scr", 1, [27]),
Parameter("scr2", 1, [28]),
Parameter("scm1", 1, [29]),
Parameter("scm2", 1, [30]),
Parameter("Zc", 1, list(range(31, 47))),
]
return {p.name: p for p in params}
def sample_data():
dataset = SyntheticData(**CFG_DATASET)
data = dataset.generate_data(**CFG_DATA)
h, sct, scr, scr2, scm1, scm2 = 1.0, 0.0, 0.0, 0.0, 0.0, 0.0
q = data["R01"].T @ data["t01_unit"]
x = np.concatenate(
(
data["E01"].ravel(),
data["t01_unit"].ravel(),
q.ravel(),
[h],
data["R01"].ravel(),
[sct, scr, scr2, scm1, scm2],
so3_orbitope(data["R01"]).ravel(),
)
)
return x[:, None], data
def gather_errors(x, A, constraint, constraint_num, is_inequality):
values = constraint.values
if is_inequality:
cond_sdpa_sdpa = np.allclose(values, np.zeros_like(values))
cond_data_sdpa = np.allclose((x.T @ A @ x).squeeze(), constraint_num)
else:
cond_sdpa_sdpa = np.allclose((x.T @ A @ x).squeeze(), values)
cond_data_sdpa = np.allclose(constraint_num, values)
errors = []
if not cond_sdpa_sdpa:
if is_inequality:
errors.append("SDPA coefficients are not zero.")
else:
errors.append("SDPA coefficients lead to different SDPA values.")
if not cond_data_sdpa:
errors.append(
"SDPA values are different than those derived from data."
f"\n{(x.T @ A @ x).squeeze()}\n{constraint_num}"
)
success = len(errors) == 0
err_msg = "Errors:\n{}".format("\n".join(errors))
return success, err_msg
def obtain_errors(constraint_class, x, constraint_num, f0=None, f1=None):
params = create_parameters()
constraint = constraint_class(params, 0, 0, None)
is_inequality = constraint.__class__.__name__.startswith("Cheirality")
if is_inequality:
constraint.compute_coeffs(constraint.coeffs, f0, f1)
| A = sdpa2mat(constraint, block_sizes=[len(x)], ndim=len(x)) | 4 | 2023-12-10 18:25:10+00:00 | 12k |
Jack24658735/FedLGT | fed_main.py | [
{
"identifier": "get_data",
"path": "load_data.py",
"snippet": "def get_data(args, curr_user=None):\n dataset = args.dataset\n data_root = args.dataroot\n batch_size = args.batch_size\n\n rescale = args.scale_size\n random_crop = args.crop_size\n attr_group_dict = args.attr_group_dict\n workers = args.workers\n n_groups = args.n_groups\n\n normTransform = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n scale_size = rescale\n crop_size = random_crop\n if args.test_batch_size == -1:\n args.test_batch_size = batch_size\n\n trainTransform = transforms.Compose([\n transforms.Resize((scale_size, scale_size)),\n transforms.Resize((crop_size, crop_size)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normTransform])\n\n testTransform = transforms.Compose([\n transforms.Resize((scale_size, scale_size)),\n transforms.CenterCrop(crop_size),\n transforms.ToTensor(),\n normTransform])\n\n test_dataset = None\n test_loader = None\n drop_last = False\n if dataset == 'coco':\n coco_root = os.path.join(data_root,'coco')\n ann_dir = os.path.join(coco_root,'annotations_pytorch')\n train_img_root = os.path.join(coco_root,'train2014')\n test_img_root = os.path.join(coco_root,'val2014')\n train_data_name = 'train.data'\n val_data_name = 'val_test.data'\n # Note: the val_test means the validation set and test set are combined\n # 20000 + 20504 = 40504 images\n \n train_dataset = Coco80Dataset(\n split='train',\n num_labels=args.num_labels,\n data_file=os.path.join(coco_root,train_data_name),\n img_root=train_img_root,\n annotation_dir=ann_dir,\n max_samples=args.max_samples,\n transform=trainTransform,\n known_labels=args.train_known_labels,\n testing=False)\n valid_dataset = None\n valid_loader = None\n test_dataset = Coco80Dataset(split='val',\n num_labels=args.num_labels,\n data_file=os.path.join(coco_root,val_data_name),\n img_root=test_img_root,\n annotation_dir=ann_dir,\n max_samples=args.max_samples,\n transform=testTransform,\n known_labels=args.test_known_labels,\n testing=True)\n elif dataset == 'coco1000':\n ann_dir = os.path.join(data_root,'coco','annotations_pytorch')\n data_dir = os.path.join(data_root,'coco')\n train_img_root = os.path.join(data_dir,'train2014')\n test_img_root = os.path.join(data_dir,'val2014')\n \n train_dataset = Coco1000Dataset(ann_dir, data_dir, split = 'train', transform = trainTransform,known_labels=args.train_known_labels,testing=False)\n valid_dataset = Coco1000Dataset(ann_dir, data_dir, split = 'val', transform = testTransform,known_labels=args.test_known_labels,testing=True)\n elif dataset == 'vg':\n vg_root = os.path.join(data_root,'VG')\n train_dir=os.path.join(vg_root,'VG_100K')\n train_list=os.path.join(vg_root,'train_list_500.txt')\n test_dir=os.path.join(vg_root,'VG_100K')\n test_list=os.path.join(vg_root,'test_list_500.txt')\n train_label=os.path.join(vg_root,'vg_category_500_labels_index.json')\n test_label=os.path.join(vg_root,'vg_category_500_labels_index.json')\n\n train_dataset = VGDataset(\n train_dir,\n train_list,\n trainTransform, \n train_label,\n known_labels=0,\n testing=False)\n \n valid_dataset = None\n valid_loader = None\n test_dataset = VGDataset(\n test_dir,\n test_list,\n testTransform,\n test_label,\n known_labels=args.test_known_labels,\n testing=True)\n \n elif dataset == 'news':\n drop_last=True\n ann_dir = '/bigtemp/jjl5sw/PartialMLC/data/bbc_data/'\n\n train_dataset = NewsDataset(ann_dir, split = 'train', transform = trainTransform,known_labels=0,testing=False)\n valid_dataset = NewsDataset(ann_dir, split = 'test', transform = testTransform,known_labels=args.test_known_labels,testing=True)\n \n elif dataset=='voc':\n voc_root = os.path.join(data_root,'voc/VOCdevkit/VOC2007/')\n img_dir = os.path.join(voc_root,'JPEGImages')\n anno_dir = os.path.join(voc_root,'Annotations')\n train_anno_path = os.path.join(voc_root,'ImageSets/Main/trainval.txt')\n test_anno_path = os.path.join(voc_root,'ImageSets/Main/test.txt')\n\n train_dataset = Voc07Dataset(\n img_dir=img_dir,\n anno_path=train_anno_path,\n image_transform=trainTransform,\n labels_path=anno_dir,\n known_labels=args.train_known_labels,\n testing=False,\n use_difficult=False)\n valid_dataset = None\n valid_loader = None\n # valid_dataset = Voc07Dataset(\n # img_dir=img_dir,\n # anno_path=test_anno_path,\n # image_transform=testTransform,\n # labels_path=anno_dir,\n # known_labels=args.test_known_labels,\n # testing=True)\n test_dataset = Voc07Dataset(\n img_dir=img_dir,\n anno_path=test_anno_path,\n image_transform=testTransform,\n labels_path=anno_dir,\n known_labels=args.test_known_labels,\n testing=True)\n\n elif dataset == 'cub':\n drop_last=True\n resol=299\n resized_resol = int(resol * 256/224)\n \n trainTransform = transforms.Compose([\n #transforms.Resize((resized_resol, resized_resol)),\n #transforms.RandomSizedCrop(resol),\n transforms.ColorJitter(brightness=32/255, saturation=(0.5, 1.5)),\n transforms.RandomResizedCrop(resol),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), #implicitly divides by 255\n transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [2, 2, 2])\n ])\n\n testTransform = transforms.Compose([\n #transforms.Resize((resized_resol, resized_resol)),\n transforms.CenterCrop(resol),\n transforms.ToTensor(), #implicitly divides by 255\n transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [2, 2, 2])\n ])\n \n cub_root = os.path.join(data_root,'CUB_200_2011')\n image_dir = os.path.join(cub_root,'images')\n train_list = os.path.join(cub_root,'class_attr_data_10','train_valid.pkl')\n valid_list = os.path.join(cub_root,'class_attr_data_10','train_valid.pkl')\n test_list = os.path.join(cub_root,'class_attr_data_10','test.pkl')\n\n train_dataset = CUBDataset(image_dir, train_list, trainTransform,known_labels=args.train_known_labels,attr_group_dict=attr_group_dict,testing=False,n_groups=n_groups)\n valid_dataset = CUBDataset(image_dir, valid_list, testTransform,known_labels=args.test_known_labels,attr_group_dict=attr_group_dict,testing=True,n_groups=n_groups)\n test_dataset = CUBDataset(image_dir, test_list, testTransform,known_labels=args.test_known_labels,attr_group_dict=attr_group_dict,testing=True,n_groups=n_groups)\n elif dataset == 'flair':\n # TODO:\n # central: \n # data file has key: {'metadata', 'train', 'val', 'test'}\n # metadata: {label_counter, fine_grained_label_counter}\n # Note: use np.array() to read in \n # train: keys() contain all image IDs\n data_dir = os.path.join(data_root, 'flair')\n img_root = os.path.join(data_dir, 'data/small_images')\n label_mapping = None\n fg_label_mapping = None\n \n if args.flair_fine:\n with open(data_dir + '/fine_grained_label_mapping.json') as fg:\n fg_label_mapping = json.load(fg)\n else:\n with open(data_dir + '/label_mapping.json') as f:\n label_mapping = json.load(f)\n\n trainTransform = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((scale_size, scale_size)),\n # transforms.Resize((crop_size, crop_size)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normTransform])\n\n testTransform = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((scale_size, scale_size)),\n transforms.CenterCrop(crop_size),\n transforms.ToTensor(),\n normTransform])\n \n train_dataset = FlairDataset(split='train', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n transform=trainTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping,\n known_labels=args.train_known_labels)\n # modify this, maybe should re-run? (2023.1.13)\n valid_dataset = FlairDataset(split='val', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n transform=testTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping)\n test_dataset = FlairDataset(split='test', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n transform=testTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping)\n elif dataset == 'flair_fed':\n # TODO:\n # 1. sample user id (e.g., 200 users per round)\n # 2. for each user, build a model\n # ref: NIID-bench\n # build \"net_dataidx_map\" for each user (i.e. for each user, it has a dataidx list)\n # get_dataloader returns \"train/test_dl_local\"\n # Here, we build the dataset to allow the \"dataidx\"!\n data_dir = os.path.join(data_root, 'flair')\n img_root = os.path.join(data_dir, 'data/small_images')\n\n label_mapping = None\n fg_label_mapping = None\n \n if args.flair_fine:\n with open(data_dir + '/fine_grained_label_mapping.json') as fg:\n fg_label_mapping = json.load(fg)\n else:\n with open(data_dir + '/label_mapping.json') as f:\n label_mapping = json.load(f)\n trainTransform = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((scale_size, scale_size)),\n # transforms.Resize((crop_size, crop_size)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normTransform])\n\n testTransform = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((scale_size, scale_size)),\n transforms.CenterCrop(crop_size),\n transforms.ToTensor(),\n normTransform])\n \n \n inp_data = h5py.File('/media/liujack/flair_hdf5/fl_data.hdf5', 'r')\n train_dataset = None\n\n\n if curr_user != None:\n train_dataset = FlairFedDataset(inp_data=inp_data,\n split='train', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n curr_user=curr_user,\n transform=trainTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping,\n known_labels=args.train_known_labels)\n else:\n train_dataset = inp_data\n # client agnoistic dataset\n valid_dataset = FlairDataset(split='val', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n transform=testTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping)\n # client agnoistic dataset\n test_dataset = FlairDataset(split='test', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n transform=testTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping)\n\n else:\n print('no dataset avail')\n exit(0)\n\n if train_dataset is not None:\n train_loader = DataLoader(train_dataset, batch_size=batch_size,shuffle=True, num_workers=workers,drop_last=drop_last)\n if valid_dataset is not None:\n valid_loader = DataLoader(valid_dataset, batch_size=args.test_batch_size,shuffle=False, num_workers=workers)\n if test_dataset is not None:\n test_loader = DataLoader(test_dataset, batch_size=args.test_batch_size,shuffle=False, num_workers=workers)\n if dataset in ['flair_fed']:\n return train_loader, valid_loader, test_loader, train_dataset\n return train_loader,valid_loader,test_loader"
},
{
"identifier": "CTranModel",
"path": "models/CTran.py",
"snippet": "class CTranModel(nn.Module):\n def __init__(self,num_labels,use_lmt,pos_emb=False,layers=3,heads=4,dropout=0.1,int_loss=0,no_x_features=False, state_weight=None, label_weight=None):\n super(CTranModel, self).__init__()\n self.use_lmt = use_lmt\n \n self.no_x_features = no_x_features # (for no image features)\n\n # ResNet backbone\n self.backbone = Backbone()\n # self.backbone_c = BackboneCLIP()\n \n hidden = 512 # this should match the backbone output feature size\n\n self.downsample = False\n if self.downsample:\n self.conv_downsample = torch.nn.Conv2d(hidden,hidden,(1,1))\n \n # Label Embeddings\n self.label_input = torch.Tensor(np.arange(num_labels)).view(1,-1).long()\n self.label_lt = torch.nn.Embedding(num_labels, hidden, padding_idx=None)\n self.clip_label_lt = nn.Embedding.from_pretrained(label_weight, freeze=True, padding_idx=None)\n # State Embeddings\n self.known_label_lt = nn.Embedding.from_pretrained(state_weight, freeze=True, padding_idx=0)\n # self.known_label_lt = torch.nn.Embedding(3, hidden, padding_idx=0)\n\n # Position Embeddings (for image features)\n self.use_pos_enc = pos_emb\n if self.use_pos_enc:\n # self.position_encoding = PositionEmbeddingSine(int(hidden/2), normalize=True)\n self.position_encoding = positionalencoding2d(hidden, 18, 18).unsqueeze(0)\n\n # Transformer\n self.self_attn_layers = nn.ModuleList([SelfAttnLayer(hidden,heads,dropout) for _ in range(layers)])\n\n # Classifier\n # Output is of size num_labels because we want a separate classifier for each label\n \n self.output_linear = torch.nn.Linear(hidden,num_labels)\n\n # Other\n self.LayerNorm = nn.LayerNorm(hidden)\n self.dropout = nn.Dropout(dropout)\n\n # Init all except pretrained backbone\n self.label_lt.apply(weights_init)\n # below is just for c_tran original\n # self.known_label_lt.apply(weights_init)\n self.LayerNorm.apply(weights_init)\n self.self_attn_layers.apply(weights_init)\n self.output_linear.apply(weights_init)\n\n # only use backbone\n self.is_only_backbone = False\n self.use_ml_head = False\n self.decoder = MLDecoder(num_classes=num_labels, decoder_embedding=512, initial_num_features=512)\n\n\n def forward(self, images, mask, label_emb_type='ctran', clip_emb=None, clip_model=None):\n\n # decide the label embedding is learnable or not\n if label_emb_type == 'ctran':\n const_label_input = self.label_input.repeat(images.size(0),1).cuda()\n label_init_emb = self.label_lt(const_label_input)\n elif label_emb_type == 'onehot':\n const_label_input = F.one_hot(torch.arange(0, 17)) # (0~num_labels)\n label_init_emb = F.pad(const_label_input, pad=(0, 512 - const_label_input.shape[0], 0, 0)).unsqueeze(0)\n label_init_emb = torch.Tensor(label_init_emb).long().cuda()\n elif label_emb_type == 'clip':\n const_label_input = self.label_input.repeat(images.size(0),1).cuda()\n label_init_emb = self.clip_label_lt(const_label_input) \n \n features = self.backbone(images)\n if self.downsample:\n features = self.conv_downsample(features)\n if self.use_pos_enc:\n pos_encoding = self.position_encoding(features,torch.zeros(features.size(0),18,18, dtype=torch.bool).cuda())\n features = features + pos_encoding\n\n features = features.view(features.size(0),features.size(1),-1).permute(0,2,1) \n\n # Convert mask values to positive integers for nn.Embedding\n label_feat_vec = custom_replace(mask,0,1,2).long()\n\n # Get state embeddings\n state_embeddings = self.known_label_lt(label_feat_vec)\n init_label_embeddings = label_init_emb + state_embeddings\n \n if self.no_x_features:\n embeddings = init_label_embeddings \n else:\n embeddings = torch.cat((features, init_label_embeddings),1)\n # Feed image and label embeddings through Transformer\n embeddings = self.LayerNorm(embeddings)\n attns = []\n if not self.is_only_backbone:\n for layer in self.self_attn_layers:\n embeddings,attn = layer(embeddings,mask=None)\n attns += attn.detach().unsqueeze(0).data\n\n # Readout each label embedding using a linear layer\n # (1, 17, 512)\n label_embeddings = embeddings[:,-init_label_embeddings.size(1):,:]\n tmp_emb = embeddings[:,init_label_embeddings.size(1):,:]\n # Different decoder input?\n ## (1) resnet + label embedding out\n ## (2) only label embedding perform self-attn => not better than (1)\n ## (3) embedding out directly from encoder (visual + label emb) => best now\n if self.use_ml_head:\n for i in range(label_embeddings.shape[0]):\n if i == 0:\n output = self.decoder(tmp_emb[i].unsqueeze(0), label_embeddings[i].unsqueeze(0))\n else:\n output = torch.cat((output, self.decoder(tmp_emb[i].unsqueeze(0), label_embeddings[i].unsqueeze(0))))\n else:\n # (1, 17, 17)\n output = self.output_linear(label_embeddings) \n diag_mask = torch.eye(output.size(1)).unsqueeze(0).repeat(output.size(0),1,1).cuda()\n output = (output*diag_mask).sum(-1)\n \n return output,None,attns"
},
{
"identifier": "get_args",
"path": "config_args.py",
"snippet": "def get_args(parser,eval=False):\n parser.add_argument('--dataroot', type=str, default='./data/')\n parser.add_argument('--dataset', type=str, choices=['coco', 'voc','coco1000','nus','vg','news','cub', 'flair', 'flair_fed'], default='coco')\n ### change default by myself\n parser.add_argument('--workers', type=int, default=1)\n\n parser.add_argument('--results_dir', type=str, default='results/')\n parser.add_argument('--test_known', type=int, default=0)\n\n # Optimization\n parser.add_argument('--optim', type=str, choices=['adam', 'sgd', 'adamw'], default='adam')\n parser.add_argument('--lr', type=float, default=0.0002)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--test_batch_size', type=int, default=-1)\n parser.add_argument('--grad_ac_steps', type=int, default=1)\n parser.add_argument('--scheduler_step', type=int, default=1000)\n parser.add_argument('--scheduler_gamma', type=float, default=0.1)\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--int_loss', type=float, default=0.0)\n parser.add_argument('--aux_loss', type=float, default=0.0)\n parser.add_argument('--loss_type', type=str, choices=['bce', 'mixed','class_ce','soft_margin'], default='bce')\n parser.add_argument('--scheduler_type', type=str, choices=['plateau', 'step'], default='plateau')\n parser.add_argument('--loss_labels', type=str, choices=['all', 'unk'], default='all')\n parser.add_argument('--lr_decay', type=float, default=0)\n parser.add_argument('--weight_decay', type=float, default=1e-4)\n parser.add_argument('--max_samples', type=int, default=-1)\n parser.add_argument('--max_batches', type=int, default=-1)\n parser.add_argument('--warmup_scheduler', action='store_true',help='')\n parser.add_argument('--rho', type=float, default=0, help='Parameter controlling the momentum SGD')\n\n\n # Model\n parser.add_argument('--layers', type=int, default=3)\n parser.add_argument('--heads', type=int, default=4)\n parser.add_argument('--dropout', type=float, default=0.1)\n parser.add_argument('--pos_emb', action='store_true',help='positional encoding') \n parser.add_argument('--use_lmt', dest='use_lmt', action='store_true',help='label mask training') \n parser.add_argument('--freeze_backbone', action='store_true')\n parser.add_argument('--no_x_features', action='store_true')\n\n # CUB\n parser.add_argument('--attr_group_dict', type=str, default='')\n \n parser.add_argument('--n_groups', type=int, default=10,help='groups for CUB test time intervention')\n\n # FLAIR\n parser.add_argument('--flair_fine', action='store_true', help='whether use the fine-grained labels defined in FLAIR.')\n \n # Image Sizes\n # change the default values for FLAIR\n parser.add_argument('--scale_size', type=int, default=256)\n parser.add_argument('--crop_size', type=int, default=256)\n\n # Testing Models\n parser.add_argument('--inference', action='store_true')\n parser.add_argument('--resume', action='store_true')\n parser.add_argument('--saved_model_name', type=str, default='')\n \n parser.add_argument('--overwrite', action='store_true')\n parser.add_argument('--name', type=str, default='')\n\n # FL setting\n # TODO:\n parser.add_argument('--is_same_initial', type=int, default=1, help='Whether initial all the models with the same parameters in fedavg')\n parser.add_argument('--n_parties', type=int, default=20, help='number of workers in a distributed cluster')\n parser.add_argument('--comm_round', type=int, default=50, help='number of maximum communication round')\n parser.add_argument('--device', type=str, default='cuda:0', help='The device to run the program')\n parser.add_argument('--init_seed', type=int, default=514, help=\"Random seed\")\n parser.add_argument('--ckpt_path', type=str, default='', help='The path to the trained model (for inference usage)')\n\n\n # learnable embedding\n parser.add_argument('--learn_emb_type', type=str, choices=['ctran', 'onehot', 'clip'], default='ctran')\n parser.add_argument('--use_global_guide', action='store_true')\n parser.add_argument('--use_only_CLIP_visual', action='store_true')\n\n parser.add_argument('--alg', type=str, default='fedavg',\n help='fl algorithms: fedavg/fedprox/scaffold/fednova/moon')\n # visualize setting\n parser.add_argument('--visualize', action='store_true')\n\n # how to build coarse level CLIP embedding\n parser.add_argument('--coarse_prompt_type', type=str, choices=['avg', 'concat'], default='concat')\n # aggregation strategies\n parser.add_argument('--agg_type', type=str, choices=['fedavg', 'loss'], default='fedavg')\n # parser.add_argument('--sample', type=float, default=0.005, help='Sample ratio for each communication round')\n args = parser.parse_args()\n model_name = args.dataset\n if args.dataset == 'voc':\n args.num_labels = 20\n elif args.dataset == 'nus':\n args.num_labels = 1000\n elif args.dataset == 'coco1000':\n args.num_labels = 1000\n elif args.dataset == 'coco':\n args.num_labels = 80\n elif args.dataset == 'vg':\n args.num_labels = 500\n elif args.dataset == 'news':\n args.num_labels = 500\n elif args.dataset == 'cub':\n args.num_labels = 112\n # add FLAIR dataset \n elif args.dataset == 'flair' or args.dataset == 'flair_fed':\n if args.flair_fine:\n args.num_labels = 1628\n else:\n args.num_labels = 17\n else:\n print('dataset not included')\n exit()\n \n\n model_name += '.'+str(args.layers)+'layer'\n model_name += '.bsz_{}'.format(int(args.batch_size * args.grad_ac_steps))\n model_name += '.'+args.optim+str(args.lr)#.split('.')[1]\n if args.dataset == 'flair_fed':\n model_name += '.'+str(args.comm_round)+'round'\n print(f'Current embedding use:{args.learn_emb_type}')\n if args.learn_emb_type == 'ctran':\n model_name += '.ctran_emb'\n elif args.learn_emb_type == 'onehot':\n model_name += '.onehot_emb'\n elif args.learn_emb_type == 'clip':\n model_name += '.clip_emb'\n else:\n print('embedding setting is not included')\n exit()\n\n if args.use_global_guide:\n model_name += '.global_guide'\n \n if args.alg == 'fedavg':\n pass\n elif args.alg == 'fedprox':\n model_name += '.fedprox'\n else:\n print('FL setting is not implemented now')\n exit()\n\n if args.use_only_CLIP_visual:\n model_name += '.use_only_CLIP_visual'\n\n if args.agg_type == 'fedavg':\n model_name += 'agg_avg'\n elif args.agg_type == 'loss':\n model_name += 'agg_loss'\n else:\n print('FL setting is not included')\n exit()\n \n if args.coarse_prompt_type == 'avg':\n model_name += 'coarse_prompt_avg'\n elif args.coarse_prompt_type == 'concat':\n model_name += 'coarse_prompt_concat'\n else:\n print('FL setting is not included')\n exit()\n\n if args.use_lmt:\n model_name += '.lmt'\n args.loss_labels = 'unk'\n model_name += '.unk_loss'\n args.train_known_labels = 100\n else:\n args.train_known_labels = 0\n\n\n if args.pos_emb:\n model_name += '.pos_emb'\n\n if args.int_loss != 0.0:\n model_name += '.int_loss'+str(args.int_loss).split('.')[1]\n\n if args.aux_loss != 0.0:\n model_name += '.aux_loss'+str(args.aux_loss).replace('.','')\n\n if args.no_x_features:\n model_name += '.no_x_features'\n \n args.test_known_labels = int(args.test_known*0.01*args.num_labels)\n\n if args.dataset == 'cub':\n # reset the TOTAL number of labels to be concepts+classes\n model_name += '.step_{}'.format(args.scheduler_step)\n\n model_name += '.'+args.loss_type+'_loss'\n args.num_labels = 112+200\n\n args.attr_group_dict = {0: [0, 1, 2, 3], 1: [4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15], 3: [16, 17, 18, 19, 20, 21], 4: [22, 23, 24], 5: [25, 26, 27, 28, 29, 30], 6: [31], 7: [32, 33, 34, 35, 36], 8: [37, 38], 9: [39, 40, 41, 42, 43, 44], 10: [45, 46, 47, 48, 49], 11: [50], 12: [51, 52], 13: [53, 54, 55, 56, 57, 58], 14: [59, 60, 61, 62, 63], 15: [64, 65, 66, 67, 68, 69], 16: [70, 71, 72, 73, 74, 75], 17: [76, 77], 18: [78, 79, 80], 19: [81, 82], 20: [83, 84, 85], 21: [86, 87, 88], 22: [89], 23: [90, 91, 92, 93, 94, 95], 24: [96, 97, 98], 25: [99, 100, 101], 26: [102, 103, 104, 105, 106, 107], 27: [108, 109, 110, 111]}\n\n if args.flair_fine:\n model_name += '.fine_grained'\n \n if args.dataset == 'flair_fed':\n model_name += f'.client={args.n_parties}'\n\n if args.name != '':\n model_name += '.'+args.name\n \n if not os.path.exists(args.results_dir):\n os.makedirs(args.results_dir)\n \n model_name = os.path.join(args.results_dir,model_name)\n \n args.model_name = model_name\n\n\n if args.inference:\n args.epochs = 1\n\n \n if os.path.exists(args.model_name) and (not args.overwrite) and (not 'test' in args.name) and (not eval) and (not args.inference) and (not args.resume):\n print(args.model_name)\n overwrite_status = input('Already Exists. Overwrite?: ')\n if overwrite_status == 'rm':\n os.system('rm -rf '+args.model_name)\n elif not 'y' in overwrite_status:\n exit(0)\n elif not os.path.exists(args.model_name):\n os.makedirs(args.model_name)\n\n\n return args"
},
{
"identifier": "WarmupLinearSchedule",
"path": "optim_schedule.py",
"snippet": "class WarmupLinearSchedule(LambdaLR):\n \"\"\" Linear warmup and then linear decay.\n Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.\n Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps`\n steps.\n \"\"\"\n def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):\n self.warmup_steps = warmup_steps\n self.t_total = t_total\n super(WarmupLinearSchedule, self).__init__(\n optimizer, self.lr_lambda, last_epoch=last_epoch)\n\n def lr_lambda(self, step):\n if step < self.warmup_steps:\n return float(step) / float(max(1, self.warmup_steps))\n return max(0.0, float(self.t_total - step) / float(\n max(1.0, self.t_total - self.warmup_steps)))"
},
{
"identifier": "run_epoch",
"path": "run_epoch.py",
"snippet": "def run_epoch(args,model,data,optimizer,epoch,desc,train=False,warmup_scheduler=None, global_model=None, emb_feat=None, clip_model=None, tau=None):\n if train:\n model.train()\n optimizer.zero_grad()\n else:\n model.eval()\n\n # pre-allocate full prediction and target tensors\n all_predictions = torch.zeros(len(data.dataset),args.num_labels).cpu()\n all_targets = torch.zeros(len(data.dataset),args.num_labels).cpu()\n all_masks = torch.zeros(len(data.dataset),args.num_labels).cpu()\n all_image_ids = []\n batch_idx = 0\n loss_total = 0\n unk_loss_total = 0\n if train:\n if args.dataset == 'flair_fed' or args.dataset == 'coco' or args.dataset == 'voc':\n data_loader = data\n else:\n data_loader = tqdm(data,mininterval=0.5,desc=desc,leave=True,ncols=100)\n else:\n data_loader = tqdm(data,mininterval=0.5,desc=desc,leave=True,ncols=100)\n for batch in data_loader:\n \n\n labels = batch['labels'].float()\n images = batch['image'].float()\n mask = batch['mask'].float()\n \n # Original setting\n mask_in = mask.clone()\n if args.use_global_guide and train:\n with torch.no_grad():\n mask_g = mask_in.clone()\n for idx, m in enumerate(mask_g[0]):\n mask_g[0][idx] = -1.\n global_pred,_,_ = global_model(images.cuda(),mask_g.cuda(), args.learn_emb_type, emb_feat, clip_model)\n global_pred = global_pred.data.cpu()\n # print(global_pred.shape)\n # print(global_pred)\n global_logits = F.sigmoid(global_pred)\n \n # TODO: (for rebuttal) global pred. masking \n for idx, m in enumerate(mask_in[0]):\n if 0.48 <= global_logits[0][idx].item() <= 0.52:\n # mask this\n mask_in[0][idx] = -1.\n \n # mask -1, 0, 1 -> assigned become 1, 0, 0\n unk_mask = custom_replace(mask_in,1,0,0)\n all_image_ids += batch['imageIDs']\n\n ### TODO: CLIP\n # idea 1: label text to replace the label embedding in c_tran => there is a \"???\" in the scene\n # idea 2: [prompt] [label_text] => can be tuned\n if train:\n pred,int_pred,attns = model(images.cuda(),mask_in.cuda(), args.learn_emb_type, emb_feat, clip_model)\n else:\n for idx, m in enumerate(mask_in[0]):\n mask_in[0][idx] = -1.\n with torch.no_grad():\n pred,int_pred,attns = model(images.cuda(),mask_in.cuda(), args.learn_emb_type, emb_feat, clip_model)\n\n if args.dataset == 'cub':\n class_label = batch['class_label'].float()\n concept_certainty = batch['concept_certainty'].float()\n\n class_label_onehot = torch.zeros(class_label.size(0),200)\n class_label_onehot.scatter_(1,class_label.long(),1)\n\n labels = torch.cat((labels,class_label_onehot),1)\n loss = F.binary_cross_entropy_with_logits(pred.view(labels.size(0),-1),labels.cuda(),reduction='none')\n loss = (unk_mask.cuda()*loss).sum()/unk_mask.detach().sum().item()\n\n aux_loss = F.binary_cross_entropy_with_logits(int_pred.view(labels.size(0),-1),labels.cuda(),reduction='none')\n aux_loss = (unk_mask.cuda()*aux_loss).sum()/unk_mask.detach().sum().item()\n\n loss_out = 1.0*loss + float(args.aux_loss)*aux_loss\n loss = loss_out\n\n else:\n # TODO: (1) change to focal loss\n # TODO: (2) change to ASL\n loss = F.binary_cross_entropy_with_logits(pred.view(labels.size(0),-1),labels.cuda(),reduction='none')\n # loss = sigmoid_focal_loss(pred.view(labels.size(0),-1), labels.cuda(), alpha=0.005, gamma=5, reduction=None)\n # cri = AsymmetricLoss()\n if args.loss_labels == 'unk': \n # only use unknown labels for loss\n loss_out = (unk_mask.cuda()*loss).sum()\n else: \n # use all labels for loss\n loss_out = loss.sum() \n\n if train:\n # (FedProx): add proximal term\n if args.alg == 'fedprox':\n global_weight_collector = list(global_model.parameters())\n mu = 0.001\n #for fedprox\n fed_prox_reg = 0.0\n for param_index, param in enumerate(model.parameters()):\n fed_prox_reg += ((mu / 2) * torch.norm((param - global_weight_collector[param_index]))**2)\n loss_out += fed_prox_reg\n loss_out.backward()\n # Grad Accumulation\n if ((batch_idx + 1) % args.grad_ac_steps == 0):\n torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=10.0, norm_type=2)\n optimizer.step()\n optimizer.zero_grad()\n if warmup_scheduler is not None:\n warmup_scheduler.step()\n ## Updates ##\n loss_total += loss_out.item()\n unk_loss_total += loss_out.item()\n start_idx,end_idx=(batch_idx*data.batch_size),((batch_idx+1)*data.batch_size)\n \n if pred.size(0) != all_predictions[start_idx:end_idx].size(0):\n pred = pred.view(labels.size(0),-1)\n \n all_predictions[start_idx:end_idx] = pred.data.cpu()\n all_targets[start_idx:end_idx] = labels.data.cpu()\n\n all_masks[start_idx:end_idx] = mask_in.data.cpu()\n\n batch_idx += 1\n if args.dataset == 'flair':\n data_loader.set_description(f'Testing')\n data_loader.set_postfix(loss=f'{loss_total / (batch_idx + 1):.4f}')\n elif args.dataset == 'flair_fed' or args.dataset == 'coco' or args.dataset == 'voc':\n if not train:\n data_loader.set_description(f'Testing')\n data_loader.set_postfix(loss=f'{loss_total / (batch_idx + 1):.4f}')\n \n\n loss_total = loss_total/float(all_predictions.size(0))\n unk_loss_total = unk_loss_total/float(all_predictions.size(0))\n\n return all_predictions,all_targets,all_masks,all_image_ids,loss_total,unk_loss_total"
}
] | import torch
import argparse
import numpy as np
import utils.evaluate as evaluate
import utils.logger as logger
import logging
import datetime
import os
import random
import clip
import json
from load_data import get_data
from models import CTranModel
from config_args import get_args
from optim_schedule import WarmupLinearSchedule
from run_epoch import run_epoch
from tqdm import tqdm
from scipy.special import softmax | 10,157 |
def init_nets(args, is_global=False, state_weight=None, label_weight=None):
if is_global:
n_parties = 1
else:
n_parties = args.n_parties
nets = {net_i: None for net_i in range(n_parties)}
### FLAIR
for net_i in range(n_parties):
model = CTranModel(args.num_labels,args.use_lmt,args.pos_emb,args.layers,args.heads,args.dropout,args.no_x_features, state_weight=state_weight, label_weight=label_weight)
nets[net_i] = model
model_meta_data = []
layer_type = []
for (k, v) in nets[0].state_dict().items():
model_meta_data.append(v.shape)
layer_type.append(k)
return nets, model_meta_data, layer_type
def local_train_net(nets, args, u_id, test_dl = None, device="cpu", g_model=None, emb_feat=None, clip_model=None):
data_pts = 0
net_dataidx_map = {}
loss_based_agg_list = []
for net_id, net in nets.items():
net.to(device)
# TODO: for COCO-dataset, just use indexing of the original dataset to have new subset dataset
# TODO: VOC dataset is similar
if args.dataset == 'coco' or args.dataset == 'voc':
sub_dst = torch.utils.data.Subset(train_dl_global.dataset, partition_idx_map[net_id])
train_dl_local = torch.utils.data.DataLoader(sub_dst, batch_size=args.batch_size,shuffle=True, num_workers=args.workers,drop_last=False)
net_dataidx_map[net_id] = len(sub_dst)
data_pts += len(sub_dst)
else:
train_dl_local, test_dl, _, train_dataset = get_data(args, curr_user=u_id[net_id])
# for fedavg
net_dataidx_map[net_id] = len(train_dataset)
data_pts += len(train_dataset)
n_epoch = args.epochs
train_metrics, testacc = train_net(net_id, net, train_dl_local, test_dl, n_epoch, args, device=device, g_model=g_model, emb_feat=emb_feat, clip_model=clip_model)
# for loss-based agg.
loss_based_agg_list.append(train_metrics['loss'])
return data_pts, net_dataidx_map, loss_based_agg_list
def train_net(net_id, model, train_dataloader, valid_dataloader, epochs, args, device="cpu", g_model=None, emb_feat=None, clip_model=None):
fl_logger.info('Training network %s' % str(net_id))
loss_logger = logger.LossLogger(args.model_name)
if args.optim == 'adam':
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),lr=args.lr)#, weight_decay=0.0004)
elif args.optim == 'adamw':
optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()),lr=args.lr)
else:
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=1e-4)
if args.warmup_scheduler:
step_scheduler = None
scheduler_warmup = WarmupLinearSchedule(optimizer, 1, 300000)
else:
scheduler_warmup = None
if args.scheduler_type == 'plateau':
step_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.1,patience=5)
elif args.scheduler_type == 'step':
step_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.scheduler_step, gamma=args.scheduler_gamma)
else:
step_scheduler = None
test_loader = None
for epoch in range(epochs):
|
def init_nets(args, is_global=False, state_weight=None, label_weight=None):
if is_global:
n_parties = 1
else:
n_parties = args.n_parties
nets = {net_i: None for net_i in range(n_parties)}
### FLAIR
for net_i in range(n_parties):
model = CTranModel(args.num_labels,args.use_lmt,args.pos_emb,args.layers,args.heads,args.dropout,args.no_x_features, state_weight=state_weight, label_weight=label_weight)
nets[net_i] = model
model_meta_data = []
layer_type = []
for (k, v) in nets[0].state_dict().items():
model_meta_data.append(v.shape)
layer_type.append(k)
return nets, model_meta_data, layer_type
def local_train_net(nets, args, u_id, test_dl = None, device="cpu", g_model=None, emb_feat=None, clip_model=None):
data_pts = 0
net_dataidx_map = {}
loss_based_agg_list = []
for net_id, net in nets.items():
net.to(device)
# TODO: for COCO-dataset, just use indexing of the original dataset to have new subset dataset
# TODO: VOC dataset is similar
if args.dataset == 'coco' or args.dataset == 'voc':
sub_dst = torch.utils.data.Subset(train_dl_global.dataset, partition_idx_map[net_id])
train_dl_local = torch.utils.data.DataLoader(sub_dst, batch_size=args.batch_size,shuffle=True, num_workers=args.workers,drop_last=False)
net_dataidx_map[net_id] = len(sub_dst)
data_pts += len(sub_dst)
else:
train_dl_local, test_dl, _, train_dataset = get_data(args, curr_user=u_id[net_id])
# for fedavg
net_dataidx_map[net_id] = len(train_dataset)
data_pts += len(train_dataset)
n_epoch = args.epochs
train_metrics, testacc = train_net(net_id, net, train_dl_local, test_dl, n_epoch, args, device=device, g_model=g_model, emb_feat=emb_feat, clip_model=clip_model)
# for loss-based agg.
loss_based_agg_list.append(train_metrics['loss'])
return data_pts, net_dataidx_map, loss_based_agg_list
def train_net(net_id, model, train_dataloader, valid_dataloader, epochs, args, device="cpu", g_model=None, emb_feat=None, clip_model=None):
fl_logger.info('Training network %s' % str(net_id))
loss_logger = logger.LossLogger(args.model_name)
if args.optim == 'adam':
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),lr=args.lr)#, weight_decay=0.0004)
elif args.optim == 'adamw':
optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()),lr=args.lr)
else:
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=1e-4)
if args.warmup_scheduler:
step_scheduler = None
scheduler_warmup = WarmupLinearSchedule(optimizer, 1, 300000)
else:
scheduler_warmup = None
if args.scheduler_type == 'plateau':
step_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.1,patience=5)
elif args.scheduler_type == 'step':
step_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.scheduler_step, gamma=args.scheduler_gamma)
else:
step_scheduler = None
test_loader = None
for epoch in range(epochs): | all_preds, all_targs, all_masks, all_ids, train_loss, train_loss_unk = run_epoch(args,model,train_dataloader,optimizer,epoch,'Training',train=True,warmup_scheduler=scheduler_warmup,global_model=g_model,emb_feat=emb_feat, clip_model=clip_model) | 4 | 2023-12-09 09:16:59+00:00 | 12k |
AgriCodeHub/dairy-django-backend | health/views.py | [
{
"identifier": "WeightRecordFilterSet",
"path": "health/filters.py",
"snippet": "class WeightRecordFilterSet(filters.FilterSet):\n \"\"\"\n Filter set for querying WeightRecord instances based on specific criteria.\n\n Filters:\n - `cow`: A filter for the cow associated with the weight record (case-insensitive contains search).\n - `day_of_weighing`: An exact match filter for the day of the weighing date.\n - `month_of_weighing`: An exact match filter for the month of the weighing date.\n - `year_of_weighing`: An exact match filter for the year of the weighing date.\n\n Meta:\n - `model`: The WeightRecord model for which the filter set is defined.\n - `fields`: The fields available for filtering, including 'cow', 'day_of_weighing', 'month_of_weighing', and 'year_of_weighing'.\n\n Usage:\n Use this filter set to apply filters when querying the list of WeightRecord instances.\n For example, to retrieve all weight records for a specific cow.\n\n Example:\n ```\n /api/weight_records/?cow=123\n ```\n \"\"\"\n\n cow = filters.CharFilter(field_name=\"cow\", lookup_expr=\"icontains\")\n day_of_weighing = filters.NumberFilter(\n field_name=\"date_taken__day\", lookup_expr=\"exact\"\n )\n month_of_weighing = filters.NumberFilter(\n field_name=\"date_taken__month\", lookup_expr=\"exact\"\n )\n year_of_weighing = filters.NumberFilter(\n field_name=\"date_taken__year\", lookup_expr=\"exact\"\n )\n\n class Meta:\n model = WeightRecord\n fields = [\n \"cow\",\n \"day_of_weighing\",\n \"month_of_weighing\",\n \"year_of_weighing\",\n ]"
},
{
"identifier": "CullingRecordFilterSet",
"path": "health/filters.py",
"snippet": "class CullingRecordFilterSet(filters.FilterSet):\n \"\"\"\n Filter set for querying CullingRecord instances based on specific criteria.\n\n Filters:\n - `reason`: A filter for the reason of culling (case-insensitive contains search).\n - `month_of_culling`: An exact match filter for the month of the culling date.\n - `year_of_culling`: An exact match filter for the year of the culling date.\n\n Meta:\n - `model`: The CullingRecord model for which the filter set is defined.\n - `fields`: The fields available for filtering, including 'reason', 'month_of_culling', and 'year_of_culling'.\n\n Usage:\n Use this filter set to apply filters when querying the list of CullingRecord instances.\n For example, to retrieve all culling records with a specific reason.\n\n Example:\n ```\n /api/culling_records/?reason=cost\n ```\n \"\"\"\n\n reason = filters.CharFilter(field_name=\"reason\", lookup_expr=\"icontains\")\n month_of_culling = filters.NumberFilter(\n field_name=\"date_carried__month\", lookup_expr=\"exact\"\n )\n year_of_culling = filters.NumberFilter(\n field_name=\"date_carried__year\", lookup_expr=\"exact\"\n )\n\n class Meta:\n model = CullingRecord\n fields = [\"reason\", \"year_of_culling\", \"month_of_culling\"]"
},
{
"identifier": "QuarantineRecordFilterSet",
"path": "health/filters.py",
"snippet": "class QuarantineRecordFilterSet(filters.FilterSet):\n \"\"\"\n Filter set for querying QuarantineRecord instances based on specific criteria.\n\n Filters:\n - `reason`: A filter for the quarantine reason (exact match).\n - `start_date`: An exact match filter for the start date.\n - `end_date`: An exact match filter for the end date.\n\n Meta:\n - `model`: The QuarantineRecord model for which the filter set is defined.\n - `fields`: The fields available for filtering, including 'reason', 'start_date', and 'end_date'.\n\n Usage:\n Use this filter set to apply filters when querying the list of QuarantineRecord instances.\n For example, to retrieve all quarantine records for a specific reason.\n\n Example:\n ```\n /api/quarantine_records/?reason=some_reason\n ```\n \"\"\"\n\n reason = filters.CharFilter(field_name=\"reason\", lookup_expr=\"exact\")\n start_date = filters.DateFilter(field_name=\"start_date\", lookup_expr=\"exact\")\n end_date = filters.DateFilter(field_name=\"end_date\", lookup_expr=\"exact\")\n\n class Meta:\n model = QuarantineRecord\n fields = [\n \"reason\",\n \"start_date\",\n \"end_date\",\n ]"
},
{
"identifier": "DiseaseFilterSet",
"path": "health/filters.py",
"snippet": "class DiseaseFilterSet(filters.FilterSet):\n \"\"\"\n Filter set for querying Disease instances based on specific criteria.\n\n Filters:\n - `cows`: A filter for cows related to the disease (case-insensitive contains).\n - `pathogen`: A filter for the pathogen causing the disease (case-insensitive contains).\n - `category`: A filter for the disease category (case-insensitive contains).\n - `occurrence_date`: A filter for the occurrence date of the disease (case-insensitive contains).\n\n Meta:\n - `model`: The Disease model for which the filter set is defined.\n - `fields`: The fields available for filtering, including 'cows', 'pathogen', 'category', and 'occurrence_date'.\n\n Usage:\n Use this filter set to apply filters when querying the list of Disease instances.\n For example, to retrieve all diseases related to a specific cow.\n\n Example:\n ```\n /api/diseases/?cows=some_cow\n ```\n \"\"\"\n\n cows = filters.CharFilter(field_name=\"cows\", lookup_expr=\"icontains\")\n pathogen = filters.CharFilter(field_name=\"pathogen\", lookup_expr=\"icontains\")\n category = filters.CharFilter(field_name=\"category\", lookup_expr=\"icontains\")\n occurrence_date = filters.CharFilter(\n field_name=\"occurrence_date\", lookup_expr=\"icontains\"\n )\n\n class Meta:\n model = Disease\n fields = [\"cows\", \"pathogen\", \"category\", \"occurrence_date\"]"
},
{
"identifier": "RecoveryFilterSet",
"path": "health/filters.py",
"snippet": "class RecoveryFilterSet(filters.FilterSet):\n \"\"\"\n Filter set for querying Recovery instances based on specific criteria.\n\n Filters:\n - `cow`: A filter for cows recovering from a disease (case-insensitive contains).\n - `disease`: A filter for the disease from which cows are recovering (case-insensitive contains).\n\n Meta:\n - `model`: The Recovery model for which the filter set is defined.\n - `fields`: The fields available for filtering, including 'cow' and 'disease'.\n\n Usage:\n Use this filter set to apply filters when querying the list of Recovery instances.\n For example, to retrieve all recovery records for a specific cow.\n\n Example:\n ```\n /api/disease-recoveries/?cow=some_cow\n ```\n \"\"\"\n\n cow = filters.CharFilter(field_name=\"cow\", lookup_expr=\"icontains\")\n disease = filters.CharFilter(field_name=\"disease\", lookup_expr=\"icontains\")\n\n class Meta:\n model = Recovery\n fields = [\"cow\", \"disease\"]"
},
{
"identifier": "TreatmentFilterSet",
"path": "health/filters.py",
"snippet": "class TreatmentFilterSet(filters.FilterSet):\n \"\"\"\n Filter set for querying Treatment instances based on specific criteria.\n\n Filters:\n - `cow`: A filter for cows undergoing treatment (case-insensitive contains).\n - `disease`: A filter for the disease for which cows are receiving treatment (case-insensitive contains).\n\n Meta:\n - `model`: The Treatment model for which the filter set is defined.\n - `fields`: The fields available for filtering, including 'cow' and 'disease'.\n\n Usage:\n Use this filter set to apply filters when querying the list of Treatment instances.\n For example, to retrieve all treatment records for a specific cow.\n\n Example:\n ```\n /api/diseases-treatments/?cow=cow\n ```\n \"\"\"\n\n cow = filters.CharFilter(field_name=\"cow\", lookup_expr=\"icontains\")\n disease = filters.CharFilter(field_name=\"disease\", lookup_expr=\"icontains\")\n\n class Meta:\n model = Treatment\n fields = [\"cow\", \"disease\"]"
},
{
"identifier": "DiseaseCategory",
"path": "health/models.py",
"snippet": "class DiseaseCategory(models.Model):\n \"\"\"\n Represents a category of diseases affecting cows.\n\n Attributes:\n - `name` (str): The name of the disease category, chosen from predefined choices.\n\n Methods:\n - `clean`: Validates the name of the disease category.\n \"\"\"\n\n name = models.CharField(\n max_length=15, choices=DiseaseCategoryChoices.choices, unique=True\n )\n\n def clean(self):\n \"\"\"\n Validate the name of the disease category.\n \"\"\"\n DiseaseCategoryValidator.validate_name(self.name)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)"
},
{
"identifier": "Symptoms",
"path": "health/models.py",
"snippet": "class Symptoms(models.Model):\n \"\"\"\n Represents symptoms reported in cows.\n\n Attributes:\n - `name` (str): The name of the symptom.\n - `symptom_type` (str): The type of the symptom, chosen from predefined choices.\n - `description` (str): Description of the symptom (nullable).\n - `date_observed` (date): Date when the symptom was observed.\n - `severity` (str): Severity of the symptom, chosen from predefined choices.\n - `location` (str): Location of the symptom, chosen from predefined choices.\n\n Methods:\n - `clean`: Validates the attributes of the symptom.\n \"\"\"\n\n name = models.CharField(max_length=50)\n symptom_type = models.CharField(max_length=20, choices=SymptomTypeChoices.choices)\n description = models.TextField(null=True)\n severity = models.CharField(max_length=20, choices=SymptomSeverityChoices.choices)\n location = models.CharField(max_length=20, choices=SymptomLocationChoices.choices)\n date_observed = models.DateField()\n\n def clean(self):\n \"\"\"\n Validates the attributes of the symptom.\n \"\"\"\n SymptomValidator.validate_name(self.name)\n SymptomValidator.validate_fields(\n self.date_observed, self.symptom_type, self.severity, self.location\n )\n SymptomValidator.validate_type_and_location_compatibility(\n self.symptom_type, self.location\n )\n\n def __str__(self):\n return f\" {self.name} reported as #{self.severity} - on #{self.date_observed}\"\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)"
},
{
"identifier": "WeightRecord",
"path": "health/models.py",
"snippet": "class WeightRecord(models.Model):\n \"\"\"\n Represents a weight record for a cow.\n\n Attributes:\n - `cow` (Cow): The cow associated with the weight record.\n - `weight_in_kgs` (Decimal): The weight of the cow in kilograms.\n - `date_taken` (Date): The date when the weight record was taken.\n\n Methods:\n - `__str__`: Returns a string representation of the weight record.\n - `clean`: Performs validation checks before saving the weight record.\n - `save`: Overrides the save method to ensure validation before saving.\n\n Raises:\n - `ValidationError`: If weight record validation fails.\n \"\"\"\n\n cow = models.ForeignKey(Cow, on_delete=models.CASCADE)\n weight_in_kgs = models.DecimalField(max_digits=6, decimal_places=2)\n date_taken = models.DateField(auto_now_add=True)\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the weight record.\n \"\"\"\n return (\n f\"{self.cow} - Weight: {self.weight_in_kgs} kgs - Date: {self.date_taken}\"\n )\n\n def clean(self):\n \"\"\"\n Performs validation checks before saving the weight record.\n\n Raises:\n - `ValidationError`: If weight record validation fails.\n \"\"\"\n WeightRecordValidator.validate_weight(self.weight_in_kgs)\n WeightRecordValidator.validate_cow_availability_status(self.cow)\n WeightRecordValidator.validate_frequency_of_weight_records(\n self.date_taken, self.cow\n )\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to ensure validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)"
},
{
"identifier": "CullingRecord",
"path": "health/models.py",
"snippet": "class CullingRecord(models.Model):\n \"\"\"\n Represents a culling record for a cow.\n\n Attributes:\n - `cow` (Cow): The cow associated with the culling record.\n - `reason` (str): The reason for culling, chosen from predefined choices.\n - `notes` (str): Additional notes or comments about the culling.\n - `date_carried` (Date): The date when the culling record was created.\n\n Methods:\n - `__str__`: Returns a string representation of the culling record.\n \"\"\"\n\n cow = models.OneToOneField(\n Cow, on_delete=models.CASCADE, related_name=\"culling_record\"\n )\n reason = models.CharField(max_length=35, choices=CullingReasonChoices.choices)\n notes = models.TextField(null=True, max_length=100)\n date_carried = models.DateField(auto_now_add=True)\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the culling record.\n \"\"\"\n return f\"CullingRecord for {self.cow} - Reason: {self.reason} - Date: {self.date_carried}\""
},
{
"identifier": "QuarantineRecord",
"path": "health/models.py",
"snippet": "class QuarantineRecord(models.Model):\n \"\"\"\n Represents a quarantine record for a cow.\n\n Attributes:\n - `cow` (Cow): The cow associated with the quarantine record.\n - `reason` (str): The reason for quarantine, chosen from predefined choices.\n - `start_date` (Date): The start date of the quarantine period.\n - `end_date` (Date): The end date of the quarantine period (optional).\n - `notes` (str): Additional notes or comments about the quarantine.\n\n Methods:\n - `__str__`: Returns a string representation of the quarantine record.\n - `clean`: Validates the reason for quarantine and the date range.\n - `save`: Overrides the save method to perform additional validation before saving.\n \"\"\"\n\n class Meta:\n get_latest_by = \"-start_date\"\n\n cow = models.ForeignKey(\n Cow, on_delete=models.CASCADE, related_name=\"quarantine_records\"\n )\n reason = models.CharField(max_length=35, choices=QuarantineReasonChoices.choices)\n start_date = models.DateField(auto_now_add=True)\n end_date = models.DateField(null=True)\n notes = models.TextField(null=True, max_length=100)\n\n def __str__(self):\n if self.end_date:\n return f\"Quarantine Record of {self.cow.tag_number} from {self.start_date} to {self.end_date}\"\n return f\"Quarantine Record of {self.cow.tag_number} from {self.start_date}\"\n\n def clean(self):\n \"\"\"\n Validate the reason for quarantine and the date range for start and end dates.\n \"\"\"\n # Validate the reason for quarantine\n QuarantineValidator.validate_reason(self.reason, self.cow)\n\n # Validate the date range for start and end dates\n QuarantineValidator.validate_date(self.start_date, self.end_date)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)"
},
{
"identifier": "Pathogen",
"path": "health/models.py",
"snippet": "class Pathogen(models.Model):\n \"\"\"\n Represents a pathogen affecting a cow.\n\n Attributes:\n - `name` (str): The type of pathogen, chosen from predefined choices.\n\n Methods:\n - `clean`: Validates the name of the pathogen.\n \"\"\"\n\n name = models.CharField(max_length=10, choices=PathogenChoices.choices, unique=True)\n # diagnosis_date = models.DateField(auto_now_add=True)\n\n def clean(self):\n \"\"\"\n Validate the name of the pathogen.\n \"\"\"\n PathogenValidator.validate_name(self.name)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)"
},
{
"identifier": "Disease",
"path": "health/models.py",
"snippet": "class Disease(models.Model):\n \"\"\"\n Represents diseases in cows.\n\n Attributes:\n - `name` (str): The name of the disease.\n - `pathogen` (ForeignKey): The pathogen causing the disease.\n - `category` (ForeignKey): The category of the disease.\n - `date_reported` (date): Date when the disease was reported.\n - `occurrence_date` (date): Date when the disease occurred.\n - `notes` (str): Additional notes about the disease (nullable).\n - `cows` (ManyToManyField): Cows affected by the disease.\n - `symptoms` (ManyToManyField): Symptoms associated with the disease.\n\n Methods:\n - `clean`: Validates the attributes of the disease.\n \"\"\"\n\n name = models.CharField(max_length=50)\n pathogen = models.ForeignKey(Pathogen, on_delete=models.PROTECT)\n category = models.ForeignKey(\n DiseaseCategory, on_delete=models.PROTECT, related_name=\"diseases\"\n )\n date_reported = models.DateField(auto_now_add=True)\n occurrence_date = models.DateField()\n notes = models.TextField(null=True)\n cows = models.ManyToManyField(Cow, related_name=\"diseases\")\n symptoms = models.ManyToManyField(Symptoms, related_name=\"diseases\")\n\n def __str__(self):\n return f\"{self.name} ({self.pathogen.name}) occurred on {self.occurrence_date}\"\n\n def clean(self):\n DiseaseValidator.validate_date(self.occurrence_date)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)"
},
{
"identifier": "Recovery",
"path": "health/models.py",
"snippet": "class Recovery(models.Model):\n \"\"\"\n Represents the recovery status of a cow from a specific disease.\n\n Attributes:\n - `cow` (ForeignKey): The cow recovering from the disease.\n - `disease` (ForeignKey): The disease from which the cow is recovering.\n - `diagnosis_date` (date): Date when the disease was diagnosed.\n - `recovery_date` (date): Date when the cow recovered (nullable).\n \"\"\"\n\n cow = models.ForeignKey(Cow, on_delete=models.CASCADE, related_name=\"recoveries\")\n disease = models.ForeignKey(\n Disease, on_delete=models.CASCADE, related_name=\"recoveries\"\n )\n diagnosis_date = models.DateField()\n recovery_date = models.DateField(null=True)\n\n def __str__(self):\n if self.recovery_date:\n return f\"{self.cow.tag_number} recovered from {self.disease.name} on {self.recovery_date}\"\n return f\"{self.cow.tag_number} not yet recovered from {self.disease.name}\""
},
{
"identifier": "Treatment",
"path": "health/models.py",
"snippet": "class Treatment(models.Model):\n \"\"\"\n Represents the treatment details for a cow diagnosed with a specific disease.\n\n Attributes:\n - `disease` (ForeignKey): The disease for which the cow is receiving treatment.\n - `cow` (ForeignKey): The cow undergoing treatment.\n - `date_of_treatment` (date): Date when the treatment was initiated.\n - `treatment_method` (str): Description of the treatment method (max length: 300).\n - `notes` (str, nullable): Additional notes about the treatment.\n - `treatment_status` (str): Status of the treatment (choices: 'Scheduled', 'In Progress', 'Completed').\n - `completion_date` (date, nullable): Date when the treatment was completed.\n\n Methods:\n - `clean`: Validates the attributes of the treatment.\n \"\"\"\n\n disease = models.ForeignKey(Disease, on_delete=models.PROTECT)\n cow = models.ForeignKey(Cow, on_delete=models.CASCADE)\n date_of_treatment = models.DateField(auto_now_add=True)\n treatment_method = models.TextField(max_length=300)\n notes = models.TextField(null=True)\n treatment_status = models.CharField(\n max_length=15,\n choices=TreatmentStatusChoices.choices,\n default=TreatmentStatusChoices.SCHEDULED,\n )\n completion_date = models.DateField(null=True)\n\n def clean(self):\n \"\"\"\n Validates the attributes of the treatment.\n\n Raises:\n - `ValidationError` (code: `invalid_treatment_status`):\n If the treatment status is invalid based on the cow's current recovery status.\n \"\"\"\n TreatmentValidator.validate_treatment_status(\n self.cow, self.treatment_status, self.notes, self.completion_date\n )\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)\n\n def __str__(self):\n if self.completion_date:\n return f\"{self.cow.tag_number} completed treatment for {self.disease.name} on {self.completion_date}\"\n return f\"{self.cow.tag_number} undergoing treatment for {self.disease.name}\""
},
{
"identifier": "DiseaseCategorySerializer",
"path": "health/serializers.py",
"snippet": "class DiseaseCategorySerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for the DiseaseCategory model.\n\n Fields:\n - `name`: A choice field representing the type of disease.\n\n Meta:\n - `model`: The DiseaseCategory model for which the serializer is defined.\n - `fields`: The fields to include in the serialized representation.\n\n Usage:\n Use this serializer to convert DiseaseCategory model instances to JSON representations\n and vice versa.\n\n Example:\n ```\n class DiseaseCategory(models.Model):\n name = models.CharField(max_length=15, choices=DiseaseCategoryChoices.choices)\n\n class DiseaseCategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = DiseaseCategory\n fields = (\"name\",)\n ```\n \"\"\"\n\n class Meta:\n model = DiseaseCategory\n fields = (\"name\",)"
},
{
"identifier": "SymptomsSerializer",
"path": "health/serializers.py",
"snippet": "class SymptomsSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for the Symptoms model.\n\n Fields:\n - `name`: The name of the symptom.\n - `symptom_type`: The type of the symptom.\n - `description`: Description of the symptom (nullable).\n - `date_observed`: Date when the symptom was observed.\n - `severity`: Severity of the symptom.\n - `location`: Location of the symptom.\n\n Meta:\n - `model`: The Symptoms model for which the serializer is defined.\n - `fields`: The fields to include in the serialized representation.\n \"\"\"\n\n class Meta:\n model = Symptoms\n fields = (\n \"name\",\n \"symptom_type\",\n \"description\",\n \"date_observed\",\n \"severity\",\n \"location\",\n )"
},
{
"identifier": "WeightRecordSerializer",
"path": "health/serializers.py",
"snippet": "class WeightRecordSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for the WeightRecord model.\n\n Fields:\n - `cow`: A primary key related field representing the cow associated with the weight record.\n - `weight_in_kgs`: A decimal field representing the weight of the cow in kilograms.\n - `date_taken`: A date field representing the date when the weight record was taken.\n\n Meta:\n - `model`: The WeightRecord model for which the serializer is defined.\n - `fields`: The fields to include in the serialized representation.\n\n Usage:\n Use this serializer to convert WeightRecord model instances to JSON representations\n and vice versa.\n\n Example:\n ```\n class WeightRecord(models.Model):\n cow = models.ForeignKey(Cow, on_delete=models.CASCADE)\n weight_in_kgs = models.DecimalField(max_digits=6, decimal_places=2)\n date_taken = models.DateField(auto_now_add=True)\n\n class WeightRecordSerializer(serializers.ModelSerializer):\n class Meta:\n model = WeightRecord\n fields = (\"cow\", \"weight_in_kgs\", \"date_taken\")\n ```\n \"\"\"\n\n cow = serializers.PrimaryKeyRelatedField(queryset=Cow.objects.all())\n\n class Meta:\n model = WeightRecord\n fields = (\"cow\", \"weight_in_kgs\", \"date_taken\")"
},
{
"identifier": "CullingRecordSerializer",
"path": "health/serializers.py",
"snippet": "class CullingRecordSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for the CullingRecord model.\n\n Fields:\n - `cow`: A primary key related field representing the cow associated with the culling record.\n - `reason`: A field representing the reason for culling, chosen from predefined choices.\n - `notes`: A text field representing additional notes or comments about the culling.\n - `date_carried`: A date field representing the date when the culling record was created.\n\n Meta:\n - `model`: The CullingRecord model for which the serializer is defined.\n - `fields`: The fields to include in the serialized representation.\n\n Usage:\n Use this serializer to convert CullingRecord model instances to JSON representations\n and vice versa.\n\n Example:\n ```python\n class CullingRecord(models.Model):\n cow = models.OneToOneField(Cow, on_delete=models.CASCADE, related_name=\"culling_record\")\n reason = models.CharField(max_length=35, choices=CullingReasonChoices.choices)\n notes = models.TextField(null=True, max_length=100)\n date_carried = models.DateField(auto_now_add=True)\n\n class CullingRecordSerializer(serializers.ModelSerializer):\n class Meta:\n model = CullingRecord\n fields = (\"cow\", \"reason\", \"notes\", \"date_carried\")\n ```\n\n \"\"\"\n\n cow = serializers.PrimaryKeyRelatedField(queryset=Cow.objects.all())\n\n class Meta:\n model = CullingRecord\n fields = (\"cow\", \"reason\", \"notes\", \"date_carried\")"
},
{
"identifier": "QuarantineRecordSerializer",
"path": "health/serializers.py",
"snippet": "class QuarantineRecordSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for the QuarantineRecord model.\n\n Fields:\n - `cow`: A primary key related field representing the cow associated with the quarantine record.\n - `reason`: A choice field representing the reason for quarantine.\n - `start_date`: A date field representing the start date of the quarantine record.\n - `end_date`: A date field representing the end date of the quarantine record.\n - `notes`: A text field representing optional notes for the quarantine record.\n\n Meta:\n - `model`: The QuarantineRecord model for which the serializer is defined.\n - `fields`: The fields to include in the serialized representation.\n\n Usage:\n Use this serializer to convert QuarantineRecord model instances to JSON representations\n and vice versa.\n\n Example:\n ```\n class QuarantineRecord(models.Model):\n cow = models.ForeignKey(Cow, on_delete=models.CASCADE, related_name=\"quarantine_records\")\n reason = models.CharField(max_length=35, choices=QuarantineReasonChoices.choices)\n start_date = models.DateField(auto_now_add=True)\n end_date = models.DateField(null=True)\n notes = models.TextField(null=True, max_length=100)\n\n class QuarantineRecordSerializer(serializers.ModelSerializer):\n class Meta:\n model = QuarantineRecord\n fields = (\"cow\", \"reason\", \"start_date\", \"end_date\", \"notes\")\n ```\n \"\"\"\n\n cow = serializers.PrimaryKeyRelatedField(queryset=Cow.objects.all())\n\n class Meta:\n model = QuarantineRecord\n fields = (\"cow\", \"reason\", \"start_date\", \"end_date\", \"notes\")"
},
{
"identifier": "PathogenSerializer",
"path": "health/serializers.py",
"snippet": "class PathogenSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for the Pathogen model.\n\n Fields:\n - `name`: A choice field representing the type of pathogen.\n\n Meta:\n - `model`: The Pathogen model for which the serializer is defined.\n - `fields`: The fields to include in the serialized representation.\n\n Usage:\n Use this serializer to convert Pathogen model instances to JSON representations\n and vice versa.\n\n Example:\n ```\n class Pathogen(models.Model):\n name = models.CharField(max_length=10, choices=PathogenChoices.choices)\n # diagnosis_date = models.DateField(auto_now_add=True)\n\n class PathogenSerializer(serializers.ModelSerializer):\n class Meta:\n model = Pathogen\n fields = (\"name\",)\n ```\n \"\"\"\n\n class Meta:\n model = Pathogen\n fields = (\"name\",)"
},
{
"identifier": "DiseaseSerializer",
"path": "health/serializers.py",
"snippet": "class DiseaseSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for the Disease model.\n\n Fields:\n - `name`: The name of the disease.\n - `pathogen`: The pathogen causing the disease.\n - `category`: The category of the disease.\n - `date_reported`: Date when the disease was reported.\n - `occurrence_date`: Date when the disease occurred.\n - `notes`: Additional notes about the disease (nullable).\n - `cows`: Cows affected by the disease.\n - `symptoms`: Symptoms associated with the disease.\n\n Meta:\n - `model`: The Disease model for which the serializer is defined.\n - `fields`: The fields to include in the serialized representation.\n\n Note: The `cows` and `symptoms` fields are represented by their primary keys in the serialized data.\n\n \"\"\"\n\n class Meta:\n model = Disease\n fields = (\n \"name\",\n \"pathogen\",\n \"category\",\n \"date_reported\",\n \"occurrence_date\",\n \"notes\",\n \"cows\",\n \"symptoms\",\n )"
},
{
"identifier": "RecoverySerializer",
"path": "health/serializers.py",
"snippet": "class RecoverySerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for the Recovery model.\n\n Fields:\n - `cow`: The cow recovering from the disease.\n - `disease`: The disease from which the cow is recovering.\n - `diagnosis_date`: Date when the disease was diagnosed.\n - `recovery_date`: Date when the cow recovered (nullable).\n\n Meta:\n - `model`: The Recovery model for which the serializer is defined.\n - `fields`: The fields to include in the serialized representation.\n\n Note: The `cow` and `disease` fields are represented by their primary keys in the serialized data.\n\n \"\"\"\n\n class Meta:\n model = Recovery\n fields = (\"cow\", \"disease\", \"diagnosis_date\", \"recovery_date\")"
},
{
"identifier": "TreatmentSerializer",
"path": "health/serializers.py",
"snippet": "class TreatmentSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for the Treatment model.\n\n Fields:\n - `disease`: The disease for which the cow is receiving treatment.\n - `cow`: The cow undergoing treatment.\n - `date_of_treatment`: Date when the treatment was initiated.\n - `treatment_method`: Description of the treatment method (max length: 300).\n - `notes`: Additional notes about the treatment (nullable).\n - `treatment_status`: Status of the treatment.\n - `completion_date`: Date when the treatment was completed (nullable).\n\n Meta:\n - `model`: The Treatment model for which the serializer is defined.\n - `fields`: The fields to include in the serialized representation.\n\n Note: The `disease` and `cow` fields are represented by their primary keys in the serialized data.\n \"\"\"\n\n class Meta:\n model = Treatment\n fields = (\n \"disease\",\n \"cow\",\n \"date_of_treatment\",\n \"treatment_method\",\n \"notes\",\n \"treatment_status\",\n \"completion_date\"\n )"
},
{
"identifier": "IsFarmManager",
"path": "users/permissions.py",
"snippet": "class IsFarmManager(BasePermission):\n \"\"\"\n Custom permission class that allows only farm owners and managers to perform an action.\n\n Raises:\n - `PermissionDenied`: If the user is not a farm owner or a farm manager.\n\n Usage:\n Add the permission class to the view or viewset that requires farm owners and managers access:\n permission_classes = [IsFarmManager]\n \"\"\"\n\n message = {\n \"error\": \"Only farm owners and managers have permission to perform this action.\"\n }\n\n def has_permission(self, request, view):\n # Check if the current user is a farm manager\n if request.user.is_authenticated and (\n request.user.is_farm_manager or request.user.is_farm_owner\n ):\n return True\n if not request.user.is_authenticated:\n raise AuthenticationFailed(\n {\"error\": \"Authentication credentials were not provided! Please login to proceed.\"}\n )\n raise PermissionDenied(self.message)"
},
{
"identifier": "IsFarmOwner",
"path": "users/permissions.py",
"snippet": "class IsFarmOwner(BasePermission):\n \"\"\"\n Custom permission class that allows only farm owners to perform an action.\n\n Raises:\n - `PermissionDenied`: If the user is not a farm owner.\n\n Usage:\n Add the permission class to the view or viewset that requires farm owners access:\n permission_classes = [IsFarmOwner]\n \"\"\"\n\n message = {\"error\": \"Only farm owners have permission to perform this action.\"}\n\n def has_permission(self, request, view):\n # Check if the current user is a farm owner\n if request.user.is_authenticated and request.user.is_farm_owner:\n return True\n if not request.user.is_authenticated:\n raise AuthenticationFailed(\n {\"error\": \"Authentication credentials were not provided! Please login to proceed.\"}\n )\n raise PermissionDenied(self.message)"
},
{
"identifier": "IsAssistantFarmManager",
"path": "users/permissions.py",
"snippet": "class IsAssistantFarmManager(BasePermission):\n \"\"\"\n Custom permission class that allows only farm owners, managers, and assistants to perform an action.\n\n Raises:\n - `PermissionDenied`: If the user is not a farm owner, a farm manager, or an assistant farm manager.\n\n Usage:\n Add the permission class to the view or viewset that requires farm owners, managers, and assistants access:\n permission_classes = [IsAssistantFarmManager]\n \"\"\"\n\n message = {\n \"error\": \"Only farm owners, managers, and assistants have permission to perform this action.\"\n }\n\n def has_permission(self, request, view):\n # Check if the current user is an assistant farm manager\n if request.user.is_authenticated and (\n request.user.is_assistant_farm_manager\n or request.user.is_farm_manager\n or request.user.is_farm_owner\n ):\n return True\n if not request.user.is_authenticated:\n raise AuthenticationFailed(\n {\"error\": \"Authentication credentials were not provided! Please login to proceed.\"}\n )\n raise PermissionDenied(self.message)"
}
] | from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, status
from rest_framework.exceptions import MethodNotAllowed
from rest_framework.filters import OrderingFilter
from rest_framework.response import Response
from health.filters import (
WeightRecordFilterSet,
CullingRecordFilterSet,
QuarantineRecordFilterSet,
DiseaseFilterSet,
RecoveryFilterSet,
TreatmentFilterSet,
)
from health.models import (
DiseaseCategory,
Symptoms,
WeightRecord,
CullingRecord,
QuarantineRecord,
Pathogen,
Disease,
Recovery,
Treatment,
)
from health.serializers import (
DiseaseCategorySerializer,
SymptomsSerializer,
WeightRecordSerializer,
CullingRecordSerializer,
QuarantineRecordSerializer,
PathogenSerializer,
DiseaseSerializer,
RecoverySerializer,
TreatmentSerializer,
)
from users.permissions import IsFarmManager, IsFarmOwner, IsAssistantFarmManager | 9,181 |
class WeightRecordViewSet(viewsets.ModelViewSet):
"""
ViewSet to handle operations related to weight records.
Provides CRUD functionality for weight records.
Actions:
- list: Get a list of weight records based on applied filters.
Returns a 404 response if no weight records match the provided filters,
and a 200 response with an empty list if there are no weight records in the database.
- retrieve: Retrieve details of a specific weight record.
- create: Create a new weight record.
- update: Update an existing weight record.
- partial_update: Partially update an existing weight record.
- destroy: Delete an existing weight record.
Serializer class used for request/response data: WeightRecordSerializer.
Permissions:
- For 'list', 'retrieve': Accessible to assistant farm managers, farm managers, and farm owners only.
- For 'create': Accessible to farm workers, assistant farm managers, farm managers, and farm owners.
- For 'update', 'partial_update', 'destroy': Accessible to farm managers and farm owners only.
"""
queryset = WeightRecord.objects.all()
serializer_class = WeightRecordSerializer
filter_backends = [DjangoFilterBackend, OrderingFilter]
filterset_class = WeightRecordFilterSet
ordering_fields = ["-date_taken"]
permission_classes = [IsAssistantFarmManager | IsFarmManager | IsFarmOwner]
def list(self, request, *args, **kwargs):
"""
List weight records based on applied filters.
Returns a 404 response if no weight records match the provided filters,
and a 200 response with an empty list if there are no weight records in the database.
"""
queryset = self.filter_queryset(self.get_queryset())
if not queryset.exists():
if request.query_params:
return Response(
{
"detail": "No Weight records found matching the provided filters."
},
status=status.HTTP_404_NOT_FOUND,
)
else:
return Response(
{"detail": "No Weight records found."}, status=status.HTTP_200_OK
)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class CullingRecordViewSet(viewsets.ModelViewSet):
"""
ViewSet to handle operations related to culling records.
Provides CRUD functionality for culling records.
Actions:
- list: Get a list of culling records based on applied filters.
Returns a 404 response if no culling records match the provided filters,
and a 200 response with an empty list if there are no culling records in the database.
- retrieve: Retrieve details of a specific culling record.
- create: Create a new culling record.
- partial_update: Not allowed.
- update: Not allowed.
- destroy: Delete an existing culling record.
Serializer class used for request/response data: CullingRecordSerializer.
Permissions:
- For 'list', 'retrieve': Accessible to farm managers and farm owners only.
- For 'create': Accessible to farm managers and farm owners only.
- For 'partial_update', 'update', 'destroy': Accessible to farm managers and farm owners only.
"""
queryset = CullingRecord.objects.all()
serializer_class = CullingRecordSerializer
filter_backends = [DjangoFilterBackend, OrderingFilter]
|
class WeightRecordViewSet(viewsets.ModelViewSet):
"""
ViewSet to handle operations related to weight records.
Provides CRUD functionality for weight records.
Actions:
- list: Get a list of weight records based on applied filters.
Returns a 404 response if no weight records match the provided filters,
and a 200 response with an empty list if there are no weight records in the database.
- retrieve: Retrieve details of a specific weight record.
- create: Create a new weight record.
- update: Update an existing weight record.
- partial_update: Partially update an existing weight record.
- destroy: Delete an existing weight record.
Serializer class used for request/response data: WeightRecordSerializer.
Permissions:
- For 'list', 'retrieve': Accessible to assistant farm managers, farm managers, and farm owners only.
- For 'create': Accessible to farm workers, assistant farm managers, farm managers, and farm owners.
- For 'update', 'partial_update', 'destroy': Accessible to farm managers and farm owners only.
"""
queryset = WeightRecord.objects.all()
serializer_class = WeightRecordSerializer
filter_backends = [DjangoFilterBackend, OrderingFilter]
filterset_class = WeightRecordFilterSet
ordering_fields = ["-date_taken"]
permission_classes = [IsAssistantFarmManager | IsFarmManager | IsFarmOwner]
def list(self, request, *args, **kwargs):
"""
List weight records based on applied filters.
Returns a 404 response if no weight records match the provided filters,
and a 200 response with an empty list if there are no weight records in the database.
"""
queryset = self.filter_queryset(self.get_queryset())
if not queryset.exists():
if request.query_params:
return Response(
{
"detail": "No Weight records found matching the provided filters."
},
status=status.HTTP_404_NOT_FOUND,
)
else:
return Response(
{"detail": "No Weight records found."}, status=status.HTTP_200_OK
)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class CullingRecordViewSet(viewsets.ModelViewSet):
"""
ViewSet to handle operations related to culling records.
Provides CRUD functionality for culling records.
Actions:
- list: Get a list of culling records based on applied filters.
Returns a 404 response if no culling records match the provided filters,
and a 200 response with an empty list if there are no culling records in the database.
- retrieve: Retrieve details of a specific culling record.
- create: Create a new culling record.
- partial_update: Not allowed.
- update: Not allowed.
- destroy: Delete an existing culling record.
Serializer class used for request/response data: CullingRecordSerializer.
Permissions:
- For 'list', 'retrieve': Accessible to farm managers and farm owners only.
- For 'create': Accessible to farm managers and farm owners only.
- For 'partial_update', 'update', 'destroy': Accessible to farm managers and farm owners only.
"""
queryset = CullingRecord.objects.all()
serializer_class = CullingRecordSerializer
filter_backends = [DjangoFilterBackend, OrderingFilter] | filterset_class = CullingRecordFilterSet | 1 | 2023-12-09 06:56:42+00:00 | 12k |
facebookresearch/chat2map-official | chat2map/mapping/passive_mapping/policy.py | [
{
"identifier": "VisualEnc",
"path": "chat2map/mapping/mapping_models/visual_cnn.py",
"snippet": "class VisualEnc(nn.Module):\n \"\"\"Visual encoder\"\"\"\n\n def __init__(self, cfg=None):\n \"\"\"Takes in RGB images and 90 degree FoV local egocentric map inputs and encodes them\"\"\"\n super().__init__()\n\n passive_mapping_cfg = cfg.PassiveMapping\n sim_cfg = cfg.TASK_CONFIG.SIMULATOR\n\n assert \"RGB_SENSOR\" in cfg.SENSORS\n\n self._n_inputMap_channels = sim_cfg.EGO_LOCAL_OCC_MAP.NUM_CHANNELS\n\n self._num_out_channels = passive_mapping_cfg.VisualEnc.num_out_channels\n assert passive_mapping_cfg.MemoryNet.Transformer.input_size == 2 * self._num_out_channels\n\n cnn_layers = [\n conv_block(self._n_inputMap_channels, 64, norm_layer=nn.BatchNorm2d),\n conv_block(64, 64, norm_layer= nn.BatchNorm2d),\n conv_block(64, 128, padding=(2, 2), norm_layer=nn.BatchNorm2d),\n conv_block(128, 256, (3, 3), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d),\n conv_block(256, self._num_out_channels, (3, 3), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d)\n ]\n self.cnn = nn.Sequential(*cnn_layers)\n\n for module in self.cnn:\n for layer in module:\n if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):\n nn.init.kaiming_normal_(\n layer.weight, nn.init.calculate_gain(\"leaky_relu\", 0.2)\n )\n if layer.bias is not None:\n nn.init.constant_(layer.bias, val=0)\n elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):\n if layer.affine:\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n\n rgb_cnn_layers = [\n conv_block(3, 64, norm_layer=nn.BatchNorm2d),\n conv_block(64, 64, norm_layer=nn.BatchNorm2d),\n conv_block(64, 128, norm_layer=nn.BatchNorm2d),\n conv_block(128, 256, norm_layer=nn.BatchNorm2d),\n conv_block(256, self._num_out_channels, norm_layer=nn.BatchNorm2d),\n ]\n self.rgb_cnn = nn.Sequential(*rgb_cnn_layers)\n\n for module in self.rgb_cnn:\n for layer in module:\n if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):\n nn.init.kaiming_normal_(\n layer.weight, nn.init.calculate_gain(\"leaky_relu\", 0.2)\n )\n if layer.bias is not None:\n nn.init.constant_(layer.bias, val=0)\n elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):\n if layer.affine:\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n\n @property\n def is_blind(self):\n return False\n\n @property\n def n_out_feats(self):\n return 16 * 512\n\n def _preprocess_rgb(self, rgb_observations):\n return rgb_observations\n\n def forward(self, observations,):\n \"\"\"Given RGB imags and 90 degree FoV egocentric local occupancy maps, produces visual features\"\"\"\n assert \"occ_map\" in observations\n occMap_observations = observations[\"occ_map\"]\n occMap_observations = occMap_observations.permute(0, 3, 1, 2)\n\n occMap_out = self.cnn(occMap_observations)\n\n assert \"rgb\" in observations\n rgb_observations = observations[\"rgb\"]\n # permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]\n rgb_observations = rgb_observations.permute(0, 3, 1, 2)\n rgb_observations = rgb_observations.float() / 255.0 # normalize RGB\n rgb_observations = self._preprocess_rgb(rgb_observations)\n\n rgb_out = self.rgb_cnn(rgb_observations)\n\n out = torch.cat([occMap_out, rgb_out], dim=1)\n\n return out"
},
{
"identifier": "OccMapDec",
"path": "chat2map/mapping/mapping_models/visual_cnn.py",
"snippet": "class OccMapDec(nn.Module):\n \"\"\"Occupancy map decoder\"\"\"\n\n def __init__(self, passive_mapping_cfg, sim_cfg,):\n \"\"\"Takes in feature outputs of the transformer decoder and predicts estimates of 360 degree FoV local\n egocentric occupancy map targets\"\"\"\n super().__init__()\n\n self._passive_mapping_cfg = passive_mapping_cfg\n self._glob_can_occ_map_ego_crop_cfg = sim_cfg.GT_GLOBAL_CANONICAL_OCC_MAP_EGO_CROP\n\n assert self._glob_can_occ_map_ego_crop_cfg.SIZE in [64, 80, 96, 128]\n\n assert passive_mapping_cfg.MemoryNet.type == \"transformer\"\n\n assert passive_mapping_cfg.MemoryNet.Transformer.decoder_out_size == 1024\n self._n_inputMapFeat_channels = 1024\n self._inputFeat_h = 4\n self._inputFeat_w = 4\n self._input_feat_size = self._n_inputMapFeat_channels * self._inputFeat_h * self._inputFeat_w\n\n if self._glob_can_occ_map_ego_crop_cfg.SIZE == 64:\n self.dec_cnn = nn.Sequential(\n convT_block(1024, 64 * 8, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS, (3, 3), stride=(1, 1),\n padding=(1, 1), outermost=True, use_sigmoid=True,),\n )\n elif self._glob_can_occ_map_ego_crop_cfg.SIZE == 80:\n self.dec_cnn = nn.Sequential(\n conv_block(1024, 64 * 8, kernel_size=(2, 2), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d),\n convT_block(64 * 8, 64 * 8, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS, (3, 3), stride=(1, 1),\n padding=(1, 1), outermost=True, use_sigmoid=True,),\n )\n elif self._glob_can_occ_map_ego_crop_cfg.SIZE == 96:\n self.dec_cnn = nn.Sequential(\n conv_block(1024, 64 * 8, kernel_size=(1, 1), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d),\n convT_block(64 * 8, 64 * 8, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS, (3, 3), stride=(1, 1),\n padding=(1, 1), outermost=True, use_sigmoid=True,),\n )\n elif self._glob_can_occ_map_ego_crop_cfg.SIZE == 128:\n self.dec_cnn = nn.Sequential(\n convT_block(1024, 64 * 8, norm_layer=nn.BatchNorm2d), \n convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS,\n outermost=True, use_sigmoid=True,),\n )\n else:\n raise NotImplementedError\n\n self.layer_init()\n\n def layer_init(self):\n for module in self.dec_cnn:\n for layer in module:\n if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):\n nn.init.kaiming_normal_(\n layer.weight, nn.init.calculate_gain(\"relu\")\n )\n if layer.bias is not None:\n nn.init.constant_(layer.bias, val=0)\n elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):\n if layer.affine:\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n\n def forward(self, observations,):\n \"\"\"Given feature outputs of the transformer memory decoder, computes estimates of the 360 degree FoV local\n egocentric target occupancy maps\"\"\"\n assert \"memory_outFeats\" in observations\n memory_outFeats = observations[\"memory_outFeats\"]\n assert len(memory_outFeats.size()) == 2\n assert memory_outFeats.size(1) == self._input_feat_size\n memory_outFeats =\\\n memory_outFeats.reshape((memory_outFeats.size(0),\n self._inputFeat_h,\n self._inputFeat_w,\n -1))\n memory_outFeats = memory_outFeats.permute((0, 3, 1, 2))\n\n out = self.dec_cnn(memory_outFeats)\n\n assert len(out.size()) == 4\n # permute tensor to dimension [BATCH x HEIGHT x WIDTH x CHANNEL]\n out = out.permute(0, 2, 3, 1)\n\n return out"
},
{
"identifier": "AudioEnc",
"path": "chat2map/mapping/mapping_models/audio_cnn.py",
"snippet": "class AudioEnc(nn.Module):\n \"\"\"Audio encoder\"\"\"\n\n def __init__(self, cfg,):\n \"\"\"Transforms the spatial audio into spectrograms and computes their features\"\"\"\n super().__init__()\n\n self._passive_mapping_cfg = cfg.PassiveMapping\n self._task_cfg = cfg.TASK_CONFIG\n self._env_cfg = self._task_cfg.ENVIRONMENT\n\n self._sim_cfg = self._task_cfg.SIMULATOR\n self._audio_cfg = self._sim_cfg.AUDIO\n\n audioEnc_cfg = self._passive_mapping_cfg.AudioEnc\n\n self._n_input_channels = audioEnc_cfg.num_input_channels\n\n self.stft_model = torchaudio.transforms.Spectrogram(\n n_fft=self._audio_cfg.N_FFT,\n win_length=self._audio_cfg.WIN_LENGTH,\n hop_length=self._audio_cfg.HOP_LENGTH,\n power=2,\n )\n\n self.model = nn.Sequential(\n conv_block(self._n_input_channels, 64, norm_layer=nn.BatchNorm2d),\n conv_block(64, 64, (8, 8), stride=(4, 4), padding=(2, 2), norm_layer=nn.BatchNorm2d),\n conv_block(64, 128, norm_layer=nn.BatchNorm2d),\n conv_block(128, 256, norm_layer=nn.BatchNorm2d),\n conv_block(256, self._passive_mapping_cfg.MemoryNet.Transformer.input_size, norm_layer=nn.BatchNorm2d),\n )\n\n for module in self.model:\n for layer in module:\n if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):\n nn.init.kaiming_normal_(\n layer.weight, nn.init.calculate_gain(\"leaky_relu\", 0.2)\n )\n if layer.bias is not None:\n nn.init.constant_(layer.bias, val=0)\n elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):\n if layer.affine:\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n\n @property\n def n_out_feats(self):\n return 1024\n\n def forward(self, observations):\n \"\"\"Given the audio waveforms, transforms them into spectrograms and computes their features\"\"\"\n assert \"audio\" in observations\n audio_wavs = observations[\"audio\"]\n audio_wavs = audio_wavs.permute(0, 2, 1)\n\n B = audio_wavs.size(0)\n n_channels = audio_wavs.size(1)\n\n audio_mag_spects = self.stft_model(audio_wavs.reshape(audio_wavs.size(0) * audio_wavs.size(1), -1)).pow(0.5)\n audio_mag_spects = audio_mag_spects.reshape(B, n_channels, *audio_mag_spects.size()[1:])\n\n out = self.model(audio_mag_spects)\n assert out.size(2) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[0]\n assert out.size(3) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]\n\n return out"
},
{
"identifier": "ModalityTagTypeNet",
"path": "chat2map/mapping/mapping_models/modality_tag_type_net.py",
"snippet": "class ModalityTagTypeNet(nn.Module):\n \"\"\"Takes the modality type tag for a certain modality and produces its embeddings\"\"\"\n\n def __init__(self, n_modality_tag_types, passive_mapping_cfg,):\n \"\"\"\n Creates an instance of the class that takes the modality type tag for a certain modality and produces its\n embeddings\n :param n_modality_tag_types: number of modality tag types\n :param passive_mapping_cfg: passive mapping config\n \"\"\"\n\n super().__init__()\n self._positional_net_cfg = passive_mapping_cfg.PositionalNet\n\n self._out_h = self._positional_net_cfg.patch_hwCh[0]\n self._out_w = self._positional_net_cfg.patch_hwCh[1]\n self._n_out_ch = self._positional_net_cfg.patch_hwCh[2]\n\n assert self._n_out_ch == passive_mapping_cfg.modality_tag_type_encoding_size, print(self._n_out_ch,\n passive_mapping_cfg.modality_tag_type_encoding_size)\n self.modality_tag_type_lookup_dict = nn.Embedding(n_modality_tag_types,\n passive_mapping_cfg.modality_tag_type_encoding_size,)\n\n def forward(self, x):\n \"\"\"Given the modality type tag, computes the modality embeddings\"\"\"\n out = self.modality_tag_type_lookup_dict(x)\n out = out.unsqueeze(-1).unsqueeze(-1)\n out = out.repeat((1, 1, self._out_h, self._out_w))\n return out"
},
{
"identifier": "PositionalNet",
"path": "chat2map/mapping/mapping_models/positional_net.py",
"snippet": "class PositionalNet(nn.Module):\n \"\"\"\n Takes in positional attributes and produces and produces their embeddings\n \"\"\"\n\n def __init__(self, passive_mapping_cfg,):\n \"\"\"\n Creates an instance of the class to take in positional attributes and produces and produces their embeddings\n :param passive_mapping_cfg: passive mapping config\n \"\"\"\n super().__init__()\n self._passive_mapping_cfg = passive_mapping_cfg\n self._positional_net_cfg = passive_mapping_cfg.PositionalNet\n\n self._n_positional_obs = 5\n\n # source: 1. https://github.com/jalammar/jalammar.github.io/blob/master/notebookes/transformer/transformer_positional_encoding_graph.ipynb\n # 2. https://towardsdatascience.com/master-positional-encoding-part-i-63c05d90a0c3\n self._freqs = MIN_FREQ ** (2 * (torch.arange(self._positional_net_cfg.num_freqs_for_sinusoidal,\n dtype=torch.float32) // 2) /\n self._positional_net_cfg.num_freqs_for_sinusoidal)\n\n assert passive_mapping_cfg.MemoryNet.Transformer.input_size == self._positional_net_cfg.patch_hwCh[2]\n self._n_out_feats = self._positional_net_cfg.patch_hwCh[2]\n\n self._positional_linear = nn.Sequential(\n nn.Linear(self._positional_net_cfg.num_freqs_for_sinusoidal * self._n_positional_obs,\n self._n_out_feats,\n bias=False),\n )\n\n @property\n def n_out_feats(self):\n return self._n_out_feats\n\n def forward(self, observations):\n \"\"\"given the positional observations, computes the positional embeddings\"\"\"\n\n positional_obs = observations[\"positional_obs\"]\n assert len(positional_obs.size()) == 2\n assert positional_obs.size(-1) == self._n_positional_obs\n\n freqs = self._freqs.unsqueeze(0).repeat((positional_obs.size(0), 1)).to(positional_obs.device)\n\n positional_net_out = []\n for positional_obs_idx in range(self._n_positional_obs):\n positional_obs_thisIdx = positional_obs[:, positional_obs_idx].unsqueeze(-1)\n positional_obs_thisIdx = positional_obs_thisIdx * freqs\n positional_obs_thisIdxClone = positional_obs_thisIdx.clone()\n positional_obs_thisIdxClone[..., ::2] = torch.cos(positional_obs_thisIdx[..., ::2])\n positional_obs_thisIdxClone[..., 1::2] = torch.sin(positional_obs_thisIdx[..., 1::2])\n\n positional_net_out.append(positional_obs_thisIdxClone)\n\n positional_net_out = torch.cat(positional_net_out, dim=-1)\n\n assert len(positional_net_out.size()) == 2\n assert positional_net_out.size(0) == positional_obs.size(0)\n assert positional_net_out.size(1) == (self._freqs.size(0) * self._n_positional_obs)\n\n positional_net_out = self._positional_linear(positional_net_out)\n positional_net_out = positional_net_out.unsqueeze(-1).unsqueeze(-1)\n positional_net_out = positional_net_out.repeat(\n (1,\n 1,\n self._positional_net_cfg.patch_hwCh[0],\n self._positional_net_cfg.patch_hwCh[1])\n )\n\n return positional_net_out"
},
{
"identifier": "PatchPositionalNet",
"path": "chat2map/mapping/mapping_models/positional_net.py",
"snippet": "class PatchPositionalNet(nn.Module):\n \"\"\"Takes in the positions of the feats corresponding to contiguous patches in an image or an audio spectrogram\n in the rasterized order and produces their embeddings\"\"\"\n\n def __init__(self, passive_mapping_cfg,):\n \"\"\"\n Creates an instance of the class that takes in the positions of the feats corresponding to contiguous patches\n in an image or an audio spectrogram in the rasterized order and produces their embeddings\n :param passive_mapping_cfg: passive mapping config\n \"\"\"\n\n super().__init__()\n self._passive_mapping_cfg = passive_mapping_cfg\n self._positional_net_cfg = passive_mapping_cfg.PositionalNet\n\n self._n_positional_obs = 1\n self._n_out_feats = self._positional_net_cfg.patch_hwCh[2]\n\n # source: 1. https://github.com/jalammar/jalammar.github.io/blob/master/notebookes/transformer/transformer_positional_encoding_graph.ipynb\n # 2. https://towardsdatascience.com/master-positional-encoding-part-i-63c05d90a0c3\n self._freqs = MIN_FREQ ** (2 * (torch.arange(self._positional_net_cfg.num_freqs_for_sinusoidal,\n dtype=torch.float32) // 2) /\n self._positional_net_cfg.num_freqs_for_sinusoidal)\n\n self._patch_positional_conv = nn.Sequential(\n nn.Conv2d(self._positional_net_cfg.num_freqs_for_sinusoidal *self._n_positional_obs,\n self._n_out_feats,\n kernel_size=1,\n bias=False),\n )\n\n positional_net_out = []\n for i in range(self._positional_net_cfg.patch_hwCh[0]):\n positional_net_out_thisRow = []\n for j in range(self._positional_net_cfg.patch_hwCh[1]):\n raster_idx = i * self._positional_net_cfg.patch_hwCh[1] + j\n\n positional_obs_thisIdx = raster_idx * self._freqs\n positional_obs_thisIdxClone = positional_obs_thisIdx.clone()\n\n positional_obs_thisIdxClone[..., ::2] = torch.cos(positional_obs_thisIdxClone[..., ::2])\n positional_obs_thisIdxClone[..., 1::2] = torch.sin(positional_obs_thisIdxClone[..., 1::2])\n\n positional_net_out_thisRow.append(positional_obs_thisIdxClone)\n\n positional_net_out.append(torch.stack(positional_net_out_thisRow, dim=0))\n\n positional_net_out = torch.stack(positional_net_out, dim=0).permute((2, 0, 1))\n self._positional_net_out = positional_net_out\n\n assert self._n_out_feats == passive_mapping_cfg.MemoryNet.Transformer.input_size\n\n @property\n def n_out_feats(self):\n return self._n_out_feats\n\n def forward(self, observations):\n positional_obs = observations[\"positional_obs\"]\n positional_net_out = self._positional_net_out.unsqueeze(0).repeat((positional_obs.size(0), 1, 1, 1))\\\n .to(positional_obs.device)\n\n positional_net_out = self._patch_positional_conv(positional_net_out)\n\n return positional_net_out"
},
{
"identifier": "FusionNet",
"path": "chat2map/mapping/mapping_models/fusion_net.py",
"snippet": "class FusionNet(nn.Module):\n \"\"\"Network to fuse modality features, positional embeddings and modality type tag embeddings\"\"\"\n\n def __init__(self,):\n super().__init__()\n\n def forward(self, observations):\n \"\"\"fuses given different features\"\"\"\n for observation_idx, observation in enumerate(observations):\n if observation_idx == 0:\n out = observation\n else:\n out = out + observation\n\n return out"
},
{
"identifier": "TransformerMemory",
"path": "chat2map/mapping/mapping_models/memory_net.py",
"snippet": "class TransformerMemory(nn.Module):\n \"\"\"Transformer memory\"\"\"\n def __init__(self, cfg):\n \"\"\"Creates an instance of the transformer memory\"\"\"\n super().__init__()\n\n self._cfg = cfg\n\n self._passive_mapping_cfg = cfg.PassiveMapping\n self._transformer_cfg = self._passive_mapping_cfg.MemoryNet.Transformer\n self._task_cfg = cfg.TASK_CONFIG\n self._env_cfg = self._task_cfg.ENVIRONMENT\n self._sim_cfg = self._task_cfg.SIMULATOR\n\n self.transformer = TransformerWoSelfAttnInDecoder(\n d_model=self._transformer_cfg.input_size,\n nhead=self._transformer_cfg.nhead,\n num_encoder_layers=self._transformer_cfg.num_encoder_layers,\n num_decoder_layers=self._transformer_cfg.num_decoder_layers,\n dim_feedforward=self._transformer_cfg.hidden_size,\n dropout=self._transformer_cfg.dropout,\n activation=self._transformer_cfg.activation,\n d_model_out=self._transformer_cfg.decoder_out_size,\n )\n\n context_length_multiplier = 3\n context_length_multiplier *= self._sim_cfg.ALL_AGENTS.NUM\n context_length_multiplier *= (self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] *\\\n self._passive_mapping_cfg.PositionalNet.patch_hwCh[1])\n\n query_length_multiplier = self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] *\\\n self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]\n\n self._src_mask = self._convert_attn_masks_to_transformer_format(\n torch.ones((self._env_cfg.MAX_CONTEXT_LENGTH * context_length_multiplier,\n self._env_cfg.MAX_CONTEXT_LENGTH * context_length_multiplier,))\n )\n self._mem_mask = self._convert_attn_masks_to_transformer_format(\n torch.ones((self._env_cfg.MAX_QUERY_LENGTH * query_length_multiplier,\n self._env_cfg.MAX_CONTEXT_LENGTH * context_length_multiplier,))\n )\n\n self._tgt_mask = self._convert_attn_masks_to_transformer_format(\n torch.eye(self._env_cfg.MAX_QUERY_LENGTH * query_length_multiplier)\n )\n\n def _convert_key_padding_masks_to_transformer_format(self, key_padding_masks):\n r\"\"\"The key_padding_masks is a FloatTensor with\n - 0 for invalid locations, and\n - 1 for valid locations.\n The required format is a BoolTensor with\n - True for invalid locations, and\n - False for valid locations\n\n source:\n - https://pytorch.org/docs/1.4.0/_modules/torch/nn/modules/transformer.html#TransformerDecoder\n - https://discuss.pytorch.org/t/how-to-add-padding-mask-to-nn-transformerencoder-module/63390/3\n \"\"\"\n return (1 - key_padding_masks) > 0\n\n def _convert_attn_masks_to_transformer_format(self, attn_masks):\n r\"\"\"The attn_masks is a FloatTensor with\n - 0 for invalid locations, and\n - 1 for valid locations.\n The required format is a FloatTensor with\n - float('-inf') for invalid locations, and\n - 0. for valid locations\n\n source:\n - https://pytorch.org/docs/1.4.0/_modules/torch/nn/modules/transformer.html#TransformerDecoder\n - https://discuss.pytorch.org/t/how-to-add-padding-mask-to-nn-transformerencoder-module/63390/3\n \"\"\"\n return attn_masks.float().masked_fill(attn_masks == 0, float('-inf')).masked_fill(attn_masks == 1, float(0.0))\n\n def forward(self, observations):\n \"\"\"computes transformer memory features given observations\"\"\"\n assert \"src_feats\" in observations\n src_feats = observations[\"src_feats\"]\n\n assert \"tgt_feats\" in observations\n tgt_feats = observations[\"tgt_feats\"]\n\n \"\"\"how masks works -- source: https://github.com/pytorch/pytorch/blob/7f73f1d591afba823daa4a99a939217fb54d7688/torch/nn/functional.py#L3360\"\"\"\n assert \"src_key_padding_mask\" in observations\n src_key_padding_mask = self._convert_key_padding_masks_to_transformer_format(observations[\"src_key_padding_mask\"])\n\n assert \"tgt_key_padding_mask\" in observations\n tgt_key_padding_mask = self._convert_key_padding_masks_to_transformer_format(observations[\"tgt_key_padding_mask\"])\n\n assert \"memory_key_padding_mask\" in observations\n memory_key_padding_mask = self._convert_key_padding_masks_to_transformer_format(observations[\"memory_key_padding_mask\"])\n\n self._src_mask = self._src_mask.to(src_feats.device)\n self._mem_mask = self._mem_mask.to(memory_key_padding_mask.device)\n self._tgt_mask = self._tgt_mask.to(tgt_feats.device)\n\n out = self.transformer(\n src_feats,\n tgt_feats,\n src_mask=self._src_mask,\n tgt_mask=self._tgt_mask,\n memory_mask=self._mem_mask,\n src_key_padding_mask=src_key_padding_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask,\n )\n\n return out"
}
] | import os
import pickle
import math
import numpy as np
import torch
import torch.nn as nn
from torchsummary import summary
from chat2map.mapping.mapping_models.visual_cnn import VisualEnc, OccMapDec
from chat2map.mapping.mapping_models.audio_cnn import AudioEnc
from chat2map.mapping.mapping_models.modality_tag_type_net import ModalityTagTypeNet
from chat2map.mapping.mapping_models.positional_net import PositionalNet, PatchPositionalNet
from chat2map.mapping.mapping_models.fusion_net import FusionNet
from chat2map.mapping.mapping_models.memory_net import TransformerMemory | 10,187 | context_key_padding_mask = torch.cat(context_key_padding_mask, dim=-1)
memory_key_padding_mask = context_key_padding_mask.clone()
# --------------------------------------------- query encoding --------------------------------------------------
query_feats = []
"""pose encoder"""
assert "query_views_pose" in observations
query_views_pose = observations["query_views_pose"]
# B x max_query_length x ... -> (B * max_query_length) x ...; B: batch size,
# max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer)
query_views_pose = query_views_pose.reshape((-1, *query_views_pose.size()[2:]))
query_views_poseFeats = self.pose_net({"positional_obs": query_views_pose})
query_feats.append(query_views_poseFeats)
query_views_posePatchFeats = self.patchPose_net({"positional_obs": query_views_pose})
query_feats.append(query_views_posePatchFeats)
"""fusion net"""
query_fusedFeats = self.fusion_net(query_feats)
query_fusedFeats = query_fusedFeats.permute((0, 2, 3, 1))
query_fusedFeats = query_fusedFeats.reshape((B,
self.max_query_length,
query_fusedFeats.size(1),
query_fusedFeats.size(2),
query_fusedFeats.size(3)))
assert query_fusedFeats.size(2) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[0]
assert query_fusedFeats.size(3) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]
query_fusedFeats = query_fusedFeats.reshape((B,
self.max_query_length *\
query_fusedFeats.size(2) *\
query_fusedFeats.size(3),
-1))
# B x max_query_length x ... -> max_query_length x B x -1; B: batch size,
# max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer)
query_fusedFeats = query_fusedFeats.permute(1, 0, 2)
"""query key padding mask"""
assert "query_views_mask" in observations
query_key_padding_mask = observations["query_views_mask"]
assert len(query_key_padding_mask.size()) == 2
query_key_padding_mask = query_key_padding_mask.unsqueeze(-1).unsqueeze(-1)
query_key_padding_mask = query_key_padding_mask.repeat((1,
1,
self._passive_mapping_cfg.PositionalNet.patch_hwCh[0],
self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]))
query_key_padding_mask = query_key_padding_mask.reshape((query_key_padding_mask.size(0),
query_key_padding_mask.size(1) *\
query_key_padding_mask.size(2) *\
query_key_padding_mask.size(3)))
"""memory encoding: context aggregation"""
memory_outFeats =\
self.memory_net(
{
"src_feats": context_fusedFeats,
"tgt_feats": query_fusedFeats,
"src_key_padding_mask": context_key_padding_mask,
"tgt_key_padding_mask": query_key_padding_mask,
"memory_key_padding_mask": memory_key_padding_mask,
}
)
# max_query_length x B x ... -> B x max_query_length x ...; B: batch size,
# max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer)
memory_outFeats = memory_outFeats.permute(1, 0, 2)
memory_outFeats = memory_outFeats.reshape((B,
self.max_query_length,
self._passive_mapping_cfg.PositionalNet.patch_hwCh[0],
self._passive_mapping_cfg.PositionalNet.patch_hwCh[1],
memory_outFeats.size(2)))
memory_outFeats = memory_outFeats.reshape((B * self.max_query_length,
self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] *\
self._passive_mapping_cfg.PositionalNet.patch_hwCh[1] *\
memory_outFeats.size(4)))
"""query occMap decoder"""
query_occMap_pred = self.query_occMap_dec({"memory_outFeats": memory_outFeats})
# (B * max_query_length) x ... -> B x max_query_length x ...; B: batch size,
# max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer)
query_occMap_pred = query_occMap_pred.reshape((B,
self.max_query_length,
*query_occMap_pred.size()[1:]))
return query_occMap_pred
class PassiveMappingPolicy(Policy):
"""
Model for passive mapping
"""
def __init__(
self,
cfg,
):
passive_mapping_cfg = cfg.PassiveMapping
task_cfg = cfg.TASK_CONFIG
sim_cfg = task_cfg.SIMULATOR
# --------------------------------------------- context encoders -----------------------------------------------
"""pose net"""
pose_net = PositionalNet(
passive_mapping_cfg=passive_mapping_cfg,
)
patchPose_net = PatchPositionalNet(
passive_mapping_cfg=passive_mapping_cfg,
)
"""modality tag type lookup table"""
modality_tag_type_lookup_dict = ModalityTagTypeNet(
n_modality_tag_types=3,
passive_mapping_cfg=passive_mapping_cfg,
)
"""views encoder"""
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Policy(nn.Module):
"""
Parent class of model for passive mapping
"""
def __init__(self,
context_views_enc,
context_audio_enc,
pose_net,
patchPose_net,
modality_tag_type_lookup_dict,
fusion_net,
memory_net,
query_occMap_dec,
cfg
):
"""Given the audio streams and sampled frames during a conversation, the model predicts estimates of target
occupancy maps"""
super().__init__()
self.context_views_enc = context_views_enc
self.context_audio_enc = context_audio_enc
self.pose_net = pose_net
self.patchPose_net = patchPose_net
self.modality_tag_type_lookup_dict = modality_tag_type_lookup_dict
self.fusion_net = fusion_net
self.memory_net = memory_net
self.query_occMap_dec = query_occMap_dec
self._cfg = cfg
self._task_cfg = cfg.TASK_CONFIG
self._env_cfg = self._task_cfg.ENVIRONMENT
self._sim_cfg = self._task_cfg.SIMULATOR
self._audio_cfg = self._sim_cfg.AUDIO
self._passive_mapping_cfg = cfg.PassiveMapping
self.max_context_length = self._env_cfg.MAX_CONTEXT_LENGTH
self.max_query_length = self._env_cfg.MAX_QUERY_LENGTH
def forward(self, observations):
"""Given the audio streams and sampled frames during a conversation, predicts estimates of target
occupancy maps"""
# --------------------------------------------- context encoding ------------------------------------------------
context_feats = []
for feat_idx in range(3):
context_feats.append([])
context_key_padding_mask = []
"""views encoder"""
assert "context_maps" in observations
context_maps = observations["context_maps"]
assert "context_views_pose" in observations
context_views_pose = observations["context_views_pose"]
assert "context_views_mask" in observations
context_views_mask = observations["context_views_mask"]
assert len(context_views_mask.size()) == 3
B = context_maps.size(0)
num_agents = context_maps.size(1)
context_maps = context_maps.reshape((-1, *context_maps.size()[3:]))
context_views_dct = {"occ_map": context_maps}
if "RGB_SENSOR" in self._cfg.SENSORS:
assert "context_rgbs" in observations
context_rgbs = observations["context_rgbs"]
context_rgbs = context_rgbs.reshape((-1, *context_rgbs.size()[3:]))
context_views_dct["rgb"] = context_rgbs
context_views_feats = self.context_views_enc(context_views_dct)
context_feats[0].append(context_views_feats)
# B x num_agents x max_context_length x ... -> (B * num_agents * max_context_length) x ...; B: batch size,
# max_context_length: transformer source sequence length S (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer)
context_views_pose = context_views_pose.reshape((-1, *context_views_pose.size()[3:]))
context_views_poseFeats = self.pose_net({"positional_obs": context_views_pose})
context_feats[0].append(context_views_poseFeats)
context_views_posePatchFeats = self.patchPose_net({"positional_obs": context_views_pose})
context_feats[0].append(context_views_posePatchFeats)
context_views_modalityType = torch.LongTensor([0]).to(context_views_poseFeats.device)
context_views_modalityTypeFeats = self.modality_tag_type_lookup_dict(context_views_modalityType)
context_views_modalityTypeFeats =\
context_views_modalityTypeFeats.repeat((context_views_posePatchFeats.size(0), 1, 1, 1))
context_feats[0].append(context_views_modalityTypeFeats)
# B x num_agents x max_context_length -> B x (num_agents * max_context_length); B: batch size,
context_views_mask = context_views_mask.reshape((context_views_mask.size(0), -1))
context_views_mask = context_views_mask.unsqueeze(-1).unsqueeze(-1)
context_views_mask = context_views_mask.repeat((1,
1,
self._passive_mapping_cfg.PositionalNet.patch_hwCh[0],
self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]))
context_views_mask = context_views_mask.reshape((context_views_mask.size(0),
context_views_mask.size(1) *\
context_views_mask.size(2) *\
context_views_mask.size(3)))
context_key_padding_mask.append(context_views_mask)
"""self audio encoder"""
assert "context_selfAudio" in observations
context_selfAudio = observations["context_selfAudio"]
assert "context_selfAudio_pose" in observations
context_selfAudio_pose = observations["context_selfAudio_pose"]
assert "context_selfAudio_mask" in observations
context_selfAudio_mask = observations["context_selfAudio_mask"]
assert len(context_selfAudio_mask.size()) == 3
assert "context_otherAudio" in observations
context_otherAudio = observations["context_otherAudio"]
context_selfAudio = context_selfAudio.reshape((-1, *context_selfAudio.size()[3:]))
context_otherAudio = context_otherAudio.reshape((-1, *context_otherAudio.size()[3:]))
context_audio = torch.cat([context_selfAudio, context_otherAudio], dim=0)
context_audio_feats = self.context_audio_enc({"audio": context_audio})
context_selfAudio_feats = context_audio_feats[:context_selfAudio.size(0)]
context_feats[1].append(context_selfAudio_feats)
# B x num_agents x max_context_length x ... -> (B * num_agents * max_context_length) x ...; B: batch size,
# max_context_length: transformer source sequence length S (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer)
context_selfAudio_pose = context_selfAudio_pose.reshape((-1, *context_selfAudio_pose.size()[3:]))
context_selfAudio_poseFeats = self.pose_net({"positional_obs": context_selfAudio_pose})
context_feats[1].append(context_selfAudio_poseFeats)
context_selfAudio_posePatchFeats = self.patchPose_net({"positional_obs": context_selfAudio_pose})
context_feats[1].append(context_selfAudio_posePatchFeats)
context_selfAudio_modalityType = torch.LongTensor([1]).to(context_selfAudio_poseFeats.device)
context_selfAudio_modalityTypeFeats = self.modality_tag_type_lookup_dict(context_selfAudio_modalityType)
context_selfAudio_modalityTypeFeats =\
context_selfAudio_modalityTypeFeats.repeat((context_selfAudio_modalityTypeFeats.size(0), 1, 1, 1))
context_feats[1].append(context_selfAudio_modalityTypeFeats)
# B x num_agents x max_context_length -> B x (num_agents * max_context_length); B: batch size,
context_selfAudio_mask = context_selfAudio_mask.reshape((context_selfAudio_mask.size(0), -1))
context_selfAudio_mask = context_selfAudio_mask.unsqueeze(-1).unsqueeze(-1)
context_selfAudio_mask = context_selfAudio_mask.repeat((1,
1,
self._passive_mapping_cfg.PositionalNet.patch_hwCh[0],
self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]))
context_selfAudio_mask = context_selfAudio_mask.reshape((context_selfAudio_mask.size(0),
context_selfAudio_mask.size(1) *\
context_selfAudio_mask.size(2) *\
context_selfAudio_mask.size(3)))
context_key_padding_mask.append(context_selfAudio_mask)
"""audio from other ego encoder"""
context_otherAudio_feats = context_audio_feats[context_otherAudio.size(0):]
assert "context_otherAudio_pose" in observations
context_otherAudio_pose = observations["context_otherAudio_pose"]
assert "context_otherAudio_mask" in observations
context_otherAudio_mask = observations["context_otherAudio_mask"]
assert len(context_otherAudio_mask.size()) == 3
context_feats[2].append(context_otherAudio_feats)
# B x num_agents x max_context_length x ... -> (B * num_agents * max_context_length) x ...; B: batch size,
# max_context_length: transformer source sequence length S (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer)
context_otherAudio_pose = context_otherAudio_pose.reshape((-1, *context_otherAudio_pose.size()[3:]))
context_otherAudio_poseFeats = self.pose_net({"positional_obs": context_otherAudio_pose})
context_feats[2].append(context_otherAudio_poseFeats)
context_otherAudio_posePatchFeats = self.patchPose_net({"positional_obs": context_otherAudio_pose})
context_feats[2].append(context_otherAudio_posePatchFeats)
context_otherAudio_modalityType =\
torch.LongTensor([2]).to(context_otherAudio_poseFeats.device)
context_otherAudio_modalityTypeFeats = self.modality_tag_type_lookup_dict(context_otherAudio_modalityType)
context_otherAudio_modalityTypeFeats =\
context_otherAudio_modalityTypeFeats.repeat((context_otherAudio_modalityTypeFeats.size(0), 1, 1, 1))
context_feats[2].append(context_otherAudio_modalityTypeFeats)
# B x num_agents x max_context_length -> B x (num_agents * max_context_length); B: batch size,
context_otherAudio_mask = context_otherAudio_mask.reshape((context_otherAudio_mask.size(0), -1))
context_otherAudio_mask = context_otherAudio_mask.unsqueeze(-1).unsqueeze(-1)
context_otherAudio_mask = context_otherAudio_mask.repeat((1,
1,
self._passive_mapping_cfg.PositionalNet.patch_hwCh[0],
self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]))
context_otherAudio_mask = context_otherAudio_mask.reshape((context_otherAudio_mask.size(0),
context_otherAudio_mask.size(1) *\
context_otherAudio_mask.size(2) *\
context_otherAudio_mask.size(3)))
context_key_padding_mask.append(context_otherAudio_mask)
"""fusion net"""
context_fusedFeats = []
for idx_contextFeats in range(len(context_feats)):
temp_context_fusedFeats = self.fusion_net(context_feats[idx_contextFeats])
temp_context_fusedFeats = temp_context_fusedFeats.permute((0, 2, 3, 1))
temp_context_fusedFeats = temp_context_fusedFeats.reshape((B,
num_agents * self.max_context_length,
temp_context_fusedFeats.size(1),
temp_context_fusedFeats.size(2),
temp_context_fusedFeats.size(3)))
assert temp_context_fusedFeats.size(2) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[0]
assert temp_context_fusedFeats.size(3) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]
temp_context_fusedFeats = temp_context_fusedFeats.reshape((B,
num_agents * self.max_context_length *\
temp_context_fusedFeats.size(2) *\
temp_context_fusedFeats.size(3),
-1))
temp_context_fusedFeats = temp_context_fusedFeats.permute(1, 0, 2)
context_fusedFeats.append(temp_context_fusedFeats)
context_fusedFeats = torch.cat(context_fusedFeats, dim=0)
"""context and memory key padding masks"""
context_key_padding_mask = torch.cat(context_key_padding_mask, dim=-1)
memory_key_padding_mask = context_key_padding_mask.clone()
# --------------------------------------------- query encoding --------------------------------------------------
query_feats = []
"""pose encoder"""
assert "query_views_pose" in observations
query_views_pose = observations["query_views_pose"]
# B x max_query_length x ... -> (B * max_query_length) x ...; B: batch size,
# max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer)
query_views_pose = query_views_pose.reshape((-1, *query_views_pose.size()[2:]))
query_views_poseFeats = self.pose_net({"positional_obs": query_views_pose})
query_feats.append(query_views_poseFeats)
query_views_posePatchFeats = self.patchPose_net({"positional_obs": query_views_pose})
query_feats.append(query_views_posePatchFeats)
"""fusion net"""
query_fusedFeats = self.fusion_net(query_feats)
query_fusedFeats = query_fusedFeats.permute((0, 2, 3, 1))
query_fusedFeats = query_fusedFeats.reshape((B,
self.max_query_length,
query_fusedFeats.size(1),
query_fusedFeats.size(2),
query_fusedFeats.size(3)))
assert query_fusedFeats.size(2) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[0]
assert query_fusedFeats.size(3) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]
query_fusedFeats = query_fusedFeats.reshape((B,
self.max_query_length *\
query_fusedFeats.size(2) *\
query_fusedFeats.size(3),
-1))
# B x max_query_length x ... -> max_query_length x B x -1; B: batch size,
# max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer)
query_fusedFeats = query_fusedFeats.permute(1, 0, 2)
"""query key padding mask"""
assert "query_views_mask" in observations
query_key_padding_mask = observations["query_views_mask"]
assert len(query_key_padding_mask.size()) == 2
query_key_padding_mask = query_key_padding_mask.unsqueeze(-1).unsqueeze(-1)
query_key_padding_mask = query_key_padding_mask.repeat((1,
1,
self._passive_mapping_cfg.PositionalNet.patch_hwCh[0],
self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]))
query_key_padding_mask = query_key_padding_mask.reshape((query_key_padding_mask.size(0),
query_key_padding_mask.size(1) *\
query_key_padding_mask.size(2) *\
query_key_padding_mask.size(3)))
"""memory encoding: context aggregation"""
memory_outFeats =\
self.memory_net(
{
"src_feats": context_fusedFeats,
"tgt_feats": query_fusedFeats,
"src_key_padding_mask": context_key_padding_mask,
"tgt_key_padding_mask": query_key_padding_mask,
"memory_key_padding_mask": memory_key_padding_mask,
}
)
# max_query_length x B x ... -> B x max_query_length x ...; B: batch size,
# max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer)
memory_outFeats = memory_outFeats.permute(1, 0, 2)
memory_outFeats = memory_outFeats.reshape((B,
self.max_query_length,
self._passive_mapping_cfg.PositionalNet.patch_hwCh[0],
self._passive_mapping_cfg.PositionalNet.patch_hwCh[1],
memory_outFeats.size(2)))
memory_outFeats = memory_outFeats.reshape((B * self.max_query_length,
self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] *\
self._passive_mapping_cfg.PositionalNet.patch_hwCh[1] *\
memory_outFeats.size(4)))
"""query occMap decoder"""
query_occMap_pred = self.query_occMap_dec({"memory_outFeats": memory_outFeats})
# (B * max_query_length) x ... -> B x max_query_length x ...; B: batch size,
# max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer)
query_occMap_pred = query_occMap_pred.reshape((B,
self.max_query_length,
*query_occMap_pred.size()[1:]))
return query_occMap_pred
class PassiveMappingPolicy(Policy):
"""
Model for passive mapping
"""
def __init__(
self,
cfg,
):
passive_mapping_cfg = cfg.PassiveMapping
task_cfg = cfg.TASK_CONFIG
sim_cfg = task_cfg.SIMULATOR
# --------------------------------------------- context encoders -----------------------------------------------
"""pose net"""
pose_net = PositionalNet(
passive_mapping_cfg=passive_mapping_cfg,
)
patchPose_net = PatchPositionalNet(
passive_mapping_cfg=passive_mapping_cfg,
)
"""modality tag type lookup table"""
modality_tag_type_lookup_dict = ModalityTagTypeNet(
n_modality_tag_types=3,
passive_mapping_cfg=passive_mapping_cfg,
)
"""views encoder""" | context_views_enc = VisualEnc( | 0 | 2023-12-06 01:20:37+00:00 | 12k |
PeriniM/Rotary-Pendulum-RL | control/reinforcement_learning/src/main.py | [
{
"identifier": "RealPendulumEnv",
"path": "control/reinforcement_learning/Environments/RealPendulumEnv.py",
"snippet": "class RealPendulumEnv(gym.Env):\n \"\"\"\n Real rotary pendulum with ESP32\n \"\"\"\n\n metadata = {\"render_modes\": [\"human\"]}\n\n def __init__(self, port, baudrate, render_mode=\"human\"):\n super(RealPendulumEnv, self).__init__()\n \"\"\"\n Initialize the environment.\n \n Args:\n port (str): The serial port to connect to.\n baudrate (int): The baudrate to use for the serial connection.\n render_mode (str, optional): The render mode. Defaults to \"human\".\n\n Returns:\n None\n \"\"\"\n\n self.ser = serial.Serial(\n port=port,\n baudrate=baudrate,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n )\n self.reader = SerialReader(self.ser, simulation=False)\n self.reader.start()\n self.render_mode = render_mode\n self.name = \"RealPendulum\"\n self.nbJoint = 1\n self.num_state = 2\n self.action = 0.0\n self.motorAngle = 0.0\n self.terminated = False\n self.truncated = False\n self.iterCount = 0\n self.maxIter = 1000\n self.omega_max = 10.0\n self.range_actions = np.array([-1.0, 1.0])\n self.range_observation = np.array([-1.0, 1.0])\n self.observation_space = spaces.Box(low=self.range_observation[0], high=self.range_observation[1], shape=(self.num_state,), dtype=np.float32)\n self.action_space = spaces.Box(low=self.range_actions[0], high=self.range_actions[1], shape=(1,), dtype=np.float32)\n # variable to store angles of one episode\n self.episode_angles = []\n \n def reset(self, seed=None, options=None):\n \"\"\"\n Reset the environment to the initial state.\n\n Args:\n None\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n info (dict): Episode information\n \"\"\"\n\n super().reset(seed=seed, options=options)\n\n # Reset the episode angles\n self.episode_angles = []\n\n # Send command to pendulum to go to home position.\n self.send_serial(\"0,1\")\n # Wait for the pendulum to report it has finished resetting.\n while (1):\n self.observation_space, self.motorAngle, self.terminated = self.reader.get_state()\n if not self.terminated:\n break\n\n # Reset iteration count\n self.iterCount = 0\n self.info = {\"episode\": {\"r\": 0.0, \"l\": self.iterCount}}\n\n return self.observation_space.astype(np.float32), self.info\n \n def step(self, action):\n \"\"\"\n Take a step in the environment\n\n Args:\n action (float): Motor speed percentage [-100, 100]\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n reward (float): Reward for the current state\n terminated (bool): Whether the episode is done or not\n truncated (bool): Whether the episode is truncated or not\n info (dict): Episode information\n \"\"\"\n\n # Send action to pendulum over serial\n self.send_serial(f\"{action*100},0\")\n self.action = action\n # Read state and episode done flag from serial\n self.observation_space, self.motorAngle, self.terminated = self.reader.get_state()\n\n # Store the angles of the episode for reward penalty\n self.episode_angles.append(self.state[0])\n \n # Calculate reward\n reward = self.calculate_reward(self.observation_space)\n self.episode_reward += reward\n self.iterCount += 1\n self.reset_policy(self.maxIter)\n self.info = {\"episode\": {\"r\": self.episode_reward, \"l\": self.iterCount}}\n\n return self.observation_space.astype(np.float32), reward, self.terminated, self.truncated, self.info\n\n def send_serial(self, command):\n \"\"\"\n Send a command to the pendulum over serial\n\n Args:\n command (str): [motor speed percentage, reset flag]\n\n Returns:\n None\n \"\"\"\n\n self.ser.write(f\"{command}\\n\".encode())\n # time.sleep(0.1)\n \n def reset_policy(self, reset_count=200):\n \"\"\"\n Policy to reset the environment\n\n Args:\n reset_count (int, optional): Number of iterations to wait before resetting the system. Defaults to 200.\n \n Returns:\n None\n \"\"\"\n\n if self.iterCount > reset_count:\n self.terminated = True\n \n def calculate_reward(self, state):\n \"\"\"\n Calculate the reward for the current state\n\n Args:\n state (np.array): [bar angle, bar angular velocity]\n\n Returns:\n reward (float): Reward for the current state\n \"\"\"\n\n # Constants to scale the angle and velocity penalties\n ANGLE_WEIGHT = 1.0\n VELOCITY_WEIGHT = 0.1\n MOTOR_ANGLE_WEIGHT = 1.0\n ACTION_WEIGHT = 0.01\n\n # Penalize the angle to be minimized\n angle_penalty = ANGLE_WEIGHT * (state[0] ** 2)\n # Penalize the angular velocity to be minimized\n velocity_penalty = VELOCITY_WEIGHT * (state[1] ** 2)\n\n # Penalize the motor angle to be minimized\n motor_angle = self.motorAngle / 180.0\n motor_angle_penalty = MOTOR_ANGLE_WEIGHT * (motor_angle ** 2)\n\n # Penalize the action to be minimized\n action_penalty = ACTION_WEIGHT * (self.action ** 2)\n\n # Reward is higher when penalties are lower\n reward = -(angle_penalty + velocity_penalty + motor_angle_penalty + action_penalty)\n\n # Penalize the reward if the average angle of the episode is close to pi\n # after 3/4 of the maximum iterations\n if self.iterCount > self.maxIter*3/4:\n if np.abs(np.mean(self.episode_angles)) < (np.pi-0.8):\n reward-=100.0\n # if self.terminated:\n # if self.iterCount < self.maxIter*1/10:\n # reward-=100.0\n return reward\n\n def render(self, camera=False):\n \"\"\"\n Render the state (optional), e.g. display the video stream\n \"\"\"\n if camera:\n print(\"Connect the camera to the pendulum and display the video stream.\")\n\n def close(self):\n \"\"\"\n Close the serial connection\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n\n self.ser.close()"
},
{
"identifier": "PyBulletPendulumEnv",
"path": "control/reinforcement_learning/Environments/PyBulletPendulumEnv.py",
"snippet": "class PyBulletPendulumEnv(gym.Env):\n \"\"\"\n PyBullet Rotary Pendulum\n \"\"\"\n\n metadata = {\"render_modes\": [\"human\"]}\n\n def __init__(self, render_mode=\"human\"):\n super(PyBulletPendulumEnv, self).__init__()\n \"\"\"\n Initialize the PyBullet Rotary Pendulum environment\n\n Args:\n render (bool, optional): Whether to render the environment. Defaults to True.\n\n Returns:\n None\n \"\"\"\n\n self.render_mode = render_mode\n # Initialize PyBullet\n if render_mode == \"human\":\n self.physicsClient = p.connect(p.GUI)\n else:\n self.physicsClient = p.connect(p.DIRECT)\n\n p.setAdditionalSearchPath(pybullet_data.getDataPath())\n p.setGravity(0, 0, -9.806)\n # move camera to focus on the robot\n p.resetDebugVisualizerCamera(cameraDistance=0.4, cameraYaw=0, cameraPitch=-30, cameraTargetPosition=[0,0,0.1])\n # Load the plane and pendulum URDF\n self.planeId = p.loadURDF(\"plane.urdf\")\n self.load_pendulum_urdf()\n\n # Define other environment parameters\n self.name = \"PyBulletPendulum\"\n self.nbJoint = 1\n self.num_state = 2\n self.action = 0.0\n self.n_actions = 101\n self.range_actions = np.array([-1.0, 1.0])\n self.range_observation = np.array([-1.0, 1.0])\n self.observation_space = spaces.Box(low=self.range_observation[0], high=self.range_observation[1], shape=(self.num_state,), dtype=np.float32)\n self.action_space = spaces.Box(low=self.range_actions[0], high=self.range_actions[1], shape=(1,), dtype=np.float32)\n self.motorAngle = 0.0\n self.terminated = False\n self.truncated = False\n self.info = {}\n self.iterCount = 0\n self.maxIter = 1500\n self.omega_max = 10.0\n self.episode_reward = 0.0\n \n # variable to store angles of one episode\n self.episode_angles = []\n\n def load_pendulum_urdf(self):\n \"\"\"\n Load the pendulum URDF into the environment.\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n\n cubeStartPos = [0, 0, 0]\n cubeStartOrientation = p.getQuaternionFromEuler([np.pi / 2, 0, 0])\n curr_dir = os.path.abspath(os.path.dirname(__file__))\n robot_urdf = 'Rotary_Pendulum_URDF.urdf'\n # Construct the path to the URDF file\n urdf_path = os.path.join(curr_dir, '..', '..', '..', 'simulation', 'urdf', robot_urdf)\n self.robotId = p.loadURDF(urdf_path, cubeStartPos, cubeStartOrientation,\n # flags=p.URDF_USE_INERTIA_FROM_FILE,\n useFixedBase=True\n )\n\n # Define joint indices as per your URDF structure\n self.motor_joint_idx = [p.getJointInfo(self.robotId, i)[1] for i in range(p.getNumJoints(self.robotId))].index(b'Revolute_3')\n self.bar_joint_idx = [p.getJointInfo(self.robotId, i)[1] for i in range(p.getNumJoints(self.robotId))].index(b'Revolute_5')\n\n # Define real robot parameters\n self.steps_per_rev = 3200\n self.max_speed_steps_per_sec = 4000.0\n # Calculate radians per step\n self.radians_per_step = (2 * np.pi) / self.steps_per_rev\n # Calculate max speed in radians per second [rad/s]\n self.max_motor_speed = self.max_speed_steps_per_sec * self.radians_per_step\n # Admissible motor angle range [deg]\n self.motor_angle_range = [-150, 150]\n self.out_of_range = False\n\n # Compensation angles for the URDF\n self.motor_compensation_angle = 0.400\n self.bar_compensation_angle = -0.264\n\n def reset(self, seed=None, options=None):\n \"\"\"\n Reset the environment to a random state\n\n Args:\n None\n\n Returns:\n state (np.array): [bar_angle, bar_angular_velocity]\n \"\"\"\n\n super().reset(seed=seed, options=options)\n # Reset the episode angles\n self.episode_angles = []\n self.episode_reward = 0.0\n self.terminated = False\n # Send command to pendulum to reset to random position\n self.send_fake_serial([0, 1])\n\n # get the state from the pendulum\n self.observation_space, self.motorAngle, self.terminated = self.get_state()\n\n # Reset iteration count\n self.iterCount = 0\n self.info = {\"episode\": {\"r\": 0.0, \"l\": self.iterCount}}\n \n return self.observation_space.astype(np.float32), self.info\n \n def step(self, action):\n \"\"\"\n Take a step in the environment\n\n Args:\n action (float): Motor speed percentage [-100, 100]\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n \"\"\"\n\n # multiply the action by 100 to get the percentage\n self.action = action*100.0\n # Send action to pendulum over serial\n self.send_fake_serial([self.action, 0])\n # Read state and episode done flag from serial\n self.observation_space, self.motorAngle, self.terminated = self.get_state()\n\n # Store the angles of the episode for reward penalty\n self.episode_angles.append(self.observation_space[0])\n \n # Calculate reward\n reward = self.calculate_reward(self.observation_space)\n self.episode_reward += reward\n self.iterCount += 1\n self.reset_policy(self.maxIter)\n self.info = {\"episode\": {\"r\": self.episode_reward, \"l\": self.iterCount}}\n\n # return normalized_state, reward, self.done\n return self.observation_space.astype(np.float32), reward, self.terminated, self.truncated, self.info\n \n def calculate_reward(self, state):\n \"\"\"\n Calculate the reward for the current state\n\n Args:\n state (np.array): [bar angle, bar angular velocity]\n\n Returns:\n reward (float): Reward for the current state\n \"\"\"\n\n # Constants to scale the bar and motor angle penalties\n ANGLE_WEIGHT = 1.0\n VELOCITY_WEIGHT = 0.1\n MOTOR_ANGLE_WEIGHT = 0.001\n ACTION_WEIGHT = 0.001\n\n # Calculate the angle penalty\n angle_penalty = ANGLE_WEIGHT * (state[0]) ** 2\n # Calculate the velocity penalty\n velocity_penalty = VELOCITY_WEIGHT * (state[1]) ** 2\n # Calculate the motor angle penalty\n # motor_angle_penalty = MOTOR_ANGLE_WEIGHT * (self.motorAngle/self.motor_angle_range[1]) ** 2\n # Calculate the action penalty\n action_penalty = ACTION_WEIGHT * (self.action/100) ** 2\n\n # Calculate the reward\n reward = - (angle_penalty + velocity_penalty)\n\n # NEW REWARD FUNCTION\n # reward range [-1, 0]\n # angle_target = 0.0\n # angular_velocity_target = 0.0\n # motor_angle_target = 0.0\n\n # reward = -1/2 * (np.abs(state[0] - angle_target)/np.pi + np.abs(self.motorAngle - motor_angle_target)/self.motor_angle_range[1])\n # reward = - 1/2 * (np.abs(state[0] - angle_target) + np.abs(state[1] - angular_velocity_target))\n # if the episode is done with enough iterations\n # if self.iterCount > int(self.maxIter/2) and self.done:\n # # if the average of the bar angles is less than 90 degrees\n # if np.abs(np.mean(self.episode_angles)) < np.deg2rad(90):\n # reward += 100.0\n\n # if the episode is done with not enough iterations\n # if self.iterCount < int(self.maxIter/10) and self.terminated:\n # # if the motor angle is out of range\n # if self.out_of_range:\n # reward -= 2000.0\n \n return reward\n \n def reset_policy(self, reset_count=200):\n \"\"\"\n Policy to reset the environment\n\n Args:\n reset_count (int, optional): Number of iterations to wait before resetting the system. Defaults to 200.\n \n Returns:\n None\n \"\"\"\n\n if self.iterCount >= reset_count:\n self.terminated = True\n\n def send_fake_serial(self, command):\n \"\"\"\n Send a command to the pendulum, simulating a fake serial connection\n\n Args:\n command (list): [motor speed percentage, episode done flag]\n\n Returns:\n None\n \"\"\"\n\n motor_speed_percentage = command[0]\n episode_done = command[1]\n\n if episode_done:\n self.terminated = True\n self.reset_robot(mode=\"random\")\n else:\n self.terminated = False\n # Calculate the motor speed in steps per second\n motor_speed = motor_speed_percentage * self.max_motor_speed / 100.0\n # set the motor velocity\n p.setJointMotorControl2(bodyUniqueId=self.robotId,\n jointIndex=self.motor_joint_idx,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=motor_speed,\n )\n\n # time.sleep(0.1)\n \n def get_state(self):\n \"\"\"\n Read the state from the pendulum, simulating a fake serial connection\n\n Args:\n None\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n motor_angle (float): Motor angle in degrees\n done (bool): Episode done flag\n \"\"\"\n\n # Get the bar angle\n bar_angle = p.getJointState(self.robotId, self.bar_joint_idx)[0] + self.bar_compensation_angle\n # Get bar angular velocity\n bar_angular_velocity = p.getJointState(self.robotId, self.bar_joint_idx)[1]\n # Get the motor angle\n motor_angle = np.rad2deg(p.getJointState(self.robotId, self.motor_joint_idx)[0] + self.motor_compensation_angle)\n\n # Map the motor angle to the correct range\n if motor_angle > self.motor_angle_range[1] or motor_angle < self.motor_angle_range[0]:\n self.out_of_range = True\n else:\n self.out_of_range = False\n \n # Adjusting the bar angle to map correctly\n bar_angle = bar_angle % (2 * np.pi) # Normalize the angle to be within 0 to 2π\n if bar_angle > np.pi:\n bar_angle -= 2 * np.pi # Adjust angles greater than π to be between -π to π\n \n if bar_angle > 0:\n bar_angle = np.pi - bar_angle\n elif bar_angle < 0:\n bar_angle = -np.pi - bar_angle\n\n # round the states to 4 decimal places\n bar_angle = round(bar_angle/np.pi, 4)\n bar_angular_velocity = round(bar_angular_velocity/self.omega_max, 4)\n motor_angle = round(motor_angle, 4)\n\n return np.array([bar_angle, bar_angular_velocity]), motor_angle, self.out_of_range\n \n def reset_robot(self, mode=\"random\"):\n \"\"\"\n Reset the robot state\n\n Args:\n mode (str, optional): Mode to reset the robot. Defaults to \"random\".\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n \"\"\"\n\n if mode == \"random\":\n # Reset the robot to a random position\n bar_angle = np.random.uniform(-np.pi, np.pi)\n bar_angular_velocity = np.random.uniform(-self.omega_max, self.omega_max)\n motor_angle = np.deg2rad(np.random.uniform(self.motor_angle_range[0], self.motor_angle_range[1]))\n\n # Set the robot to the random position\n p.resetJointState(self.robotId, self.bar_joint_idx, targetValue=bar_angle)\n # p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=motor_angle)\n p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=-self.motor_compensation_angle)\n # set bar velocity with no force\n p.setJointMotorControl2(bodyUniqueId=self.robotId,\n jointIndex=self.bar_joint_idx,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=bar_angular_velocity,\n force=0\n )\n elif mode == \"home\":\n # Reset the robot to the home position\n p.resetJointState(self.robotId, self.bar_joint_idx, targetValue=-self.bar_compensation_angle)\n p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=-self.motor_compensation_angle)\n\n # set bar velocity with no force\n p.setJointMotorControl2(bodyUniqueId=self.robotId,\n jointIndex=self.bar_joint_idx,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=0,\n force=0\n )\n \n return self.get_state()[0]\n \n def render(self, fps=240.0):\n \"\"\"\n Render the pendulum in PyBullet\n\n Args:\n fps (float, optional): Number of frames per second. Defaults to 240.0.\n\n Returns:\n None\n \"\"\"\n p.stepSimulation()\n if self.render_mode == \"human\":\n time.sleep(1./fps)\n \n def close(self):\n \"\"\"\n Close the PyBullet connection\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n p.disconnect()"
},
{
"identifier": "FakeEnv",
"path": "control/reinforcement_learning/Environments/FakeEnv.py",
"snippet": "class FakeEnv:\n \"\"\"\n Fake environment for testing purposes\n \"\"\"\n def __init__(self, nbJoint=1):\n \"\"\"\n Initialize the fake environment.\n\n Args:\n nbJoint (int): The number of joints to simulate.\n\n Returns:\n None\n \"\"\"\n\n self.name = \"FakeEnv\"\n self.nbJoint = nbJoint\n if nbJoint == 1:\n self.num_state = 2\n self.name = \"1-fakeenv\"\n else:\n self.num_state = 2*nbJoint\n self.name = str(nbJoint)+\"-fakeenv\"\n \n self.x = np.zeros(self.num_state)\n self.vmax = 8.0\n self.iterCount = 0\n self.maxIter = 1000\n self.range_actions = np.array([-100.0, 100.0])\n self.done = False\n\n def reset(self):\n \"\"\"\n Reset the environment to the initial state (random)\n\n Args:\n None\n\n Returns:\n state (np.array): list of joint angles and velocities\n \"\"\"\n\n for i in range(self.num_state):\n if i%2==0:\n self.x[i] = np.random.uniform(-np.pi, np.pi)\n else:\n self.x[i] = np.random.uniform(-self.vmax, self.vmax)\n self.iterCount = 0\n self.done = False\n return self.x\n \n def step(self, action):\n \"\"\"\n Take a step in the environment (random)\n\n Args:\n action (float): The action to take (it is not used)\n\n Returns:\n state (np.array): list of joint angles and velocities\n reward (float): The reward for the action taken (it is not used)\n done (bool): Whether the episode is done or not\n \"\"\"\n\n for i in range(self.num_state):\n if i%2==0:\n self.x[i] = np.random.uniform(-np.pi, np.pi)\n else:\n self.x[i] = np.random.uniform(-self.vmax, self.vmax)\n self.iterCount += 1\n if self.iterCount >= self.maxIter:\n self.done = True\n return self.x, np.random.uniform(-1, 1), self.done\n \n def render(self, debug=False):\n \"\"\"\n Print the current state\n\n Args:\n debug (bool): Whether to print the state or not\n\n Returns:\n None\n \"\"\"\n \n if debug:\n print(self.x)"
},
{
"identifier": "Agent",
"path": "control/reinforcement_learning/DQN/Agent.py",
"snippet": "class Agent:\n \"\"\"\n DQN Agent\n - Take an environment\n - Set up the deep neural network\n - Store the experience\n - Choose action\n - Train the network\n - Evaluate the network\n \"\"\"\n def __init__(self, env):\n\n # check if gpu is available\n if tf.config.list_physical_devices('GPU'):\n # print the device name\n print(\"GPU is available\")\n print(\"Device name: {}\".format(tf.test.gpu_device_name()))\n \n else:\n print(\"GPU is not available\")\n\n self.env = env\n \n self.nJoint = self.env.nbJoint\n \n # read INI file\n # get the path of the root directory\n root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n ini_file_path = os.path.join(root_dir, 'config.ini')\n self.params = self.parse_ini(ini_file_path)\n \n # set up the parameters from the INI file\n self.action_steps = int(self.params['action_steps'])\n self.torque_range = ast.literal_eval(self.params['control_range'])\n self.max_episode_steps = int(self.params['max_episode_steps'])\n self.train_episodes = int(self.params['train_episodes'])\n self.lr = float(self.params['lr'])\n self.discount_factor = float(self.params['discount_factor'])\n self.epsilon = float(self.params['epsilon'])\n self.epsilon_decay_episodes = int(self.params['epsilon_decay_episodes'])\n self.epsilon_final = float(self.params['epsilon_final'])\n self.buffer_size = int(self.params['buffer_size'])\n self.batch_size = int(self.params['batch_size'])\n self.hidden_dims = ast.literal_eval(self.params['hidden_dims'])\n self.update_rate_episodes = int(self.params['target_update_episodes'])\n self.train_rate_steps = int(self.params['train_rate_steps'])\n\n self.discounted_reward = 0.0\n self.epsilon_decay = (self.epsilon - self.epsilon_final) / self.epsilon_decay_episodes\n\n # set up the environment parameters\n self.env.num_actions = self.action_steps\n self.env.range_actions = self.torque_range\n self.env.maxIter = self.max_episode_steps\n self.env.umax = self.torque_range[1]\n self.env.actions = np.linspace(self.env.range_actions[0], self.env.range_actions[1], self.action_steps)\n self.env.action_space = [i for i in range(self.action_steps)]\n self.action_space = self.env.action_space\n\n self.total_step_counter = 0\n self.replay_buffer = ReplayBuffer(self.buffer_size)\n\n self.name_model = self.env.name + '_'+str(self.action_steps)+'_'+str(self.max_episode_steps)+'_' + \\\n str(self.epsilon_decay_episodes)+'_'+str(self.buffer_size)+'_'+str(self.batch_size)+'_'+ \\\n str(self.hidden_dims)+'_'+str(self.update_rate_episodes)+'_'+str(self.train_rate_steps)\n \n # path of the weights folder\n self.weights_folder = os.path.join(root_dir, 'saved_weights')\n self.final_weights_folder = os.path.join(root_dir, 'final_results/'+self.env.name)\n self.weights_name = ['dqn_weights_' + self.name_model +'.h5',\n 'dqn_target_weights_' + self.name_model +'.h5']\n \n # save metrics in a csv file (not used)\n self.metrics_folder = os.path.join(root_dir, 'saved_metrics')\n self.metrics_df = pd.DataFrame()\n self.metrics_name = ''\n\n # save the logs in a tensorboard file\n self.log_dir = os.path.join(root_dir, 'logs')\n self.writer = tf.summary.create_file_writer(os.path.join(self.log_dir, self.name_model))\n\n # create the deep neural network\n self.q_net = DeepQNetwork(self.lr, self.env.num_actions, self.env.num_state, self.hidden_dims , opt='adam', loss='mse')\n self.q_target_net = DeepQNetwork(self.lr, self.env.num_actions, self.env.num_state, self.hidden_dims, opt='adam', loss='mse')\n\n self.loss = 0.0\n self.current_episode = 0\n self.training_time = 0\n \n def policy(self, observation, type='epsilon_greedy'):\n \"\"\"\n Choose an action based on the policy\n \"\"\"\n if type == 'epsilon_greedy':\n if np.random.random() < self.epsilon:\n action = np.random.choice(self.action_space)\n else:\n action = np.argmax(self.q_net.predict(np.array([observation])))\n elif type == 'greedy':\n action = np.argmax(self.q_net.predict(np.array([observation])))\n elif type == 'random':\n action = np.random.choice(self.action_space)\n else:\n raise Exception(\"Unknown policy type\")\n \n return action\n \n def train(self):\n \"\"\"\n Train the network\n \"\"\"\n # check if the replay buffer has enough experiences\n if len(self.replay_buffer.gameplay_experiences) < self.batch_size:\n return\n \n # sample a batch of experiences\n states, actions, rewards, new_states, dones = self.replay_buffer.sample_batch(self.batch_size)\n \n # predict the q values of current states and next states\n q_predicted = self.q_net.predict(states)\n q_next = self.q_target_net.predict(new_states)\n # get the maximum q value of the next states\n q_max_next = np.max(q_next, axis=1)\n # copy the q values of the current states\n q_target = q_predicted.copy()\n \n for i in range(self.batch_size):\n # Q(s, a) = r + γ * max(Q(s', a')) * (1 - done)\n # if the next state is terminal, then the q value is just the reward\n # otherwise, estimate the q value using the target network\n q_target[i, actions[i]] = rewards[i] + self.discount_factor * q_max_next[i] * (1 - dones[i])\n \n # train the network in batches\n self.loss = self.q_net.train_on_batch(states, q_target)\n # self.loss = self.q_net.train_batch_gradientTape(states, q_target)\n \n def train_model(self, render=True, plot=True, verbose=False, soft_start=False):\n \"\"\"\n Train the model for a number of episodes and plot the reward\n \"\"\"\n\n # start from existing weights\n if soft_start:\n # load the weights\n self.q_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[0]))\n self.q_target_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[1]))\n\n start_training_time = time.time()\n\n # train the network for a number of episodes\n for episode in range(self.train_episodes):\n observation, _ = self.env.reset()\n done = False\n self.discounted_reward = 0.0\n episode_steps = 0\n self.loss = 0.0\n self.current_episode = episode\n\n # while the episode is not done\n while not done:\n if render:\n self.env.render()\n\n # copy of the observation to store in the replay buffer\n # because when passing the env reference, the old observation gets overwritten\n observation_copy = copy.copy(observation)\n action = self.policy(observation, 'epsilon_greedy')\n new_observation, reward, done, _, _ = self.env.step(self.env.actions[action])\n new_observation_copy = copy.copy(new_observation)\n self.discounted_reward += self.discount_factor**episode_steps * reward\n \n # store the experience in the replay buffer\n self.replay_buffer.store_tuple(observation_copy, action, reward, new_observation_copy, done)\n observation = new_observation_copy\n\n # train the network every train_rate_steps\n if self.total_step_counter % self.train_rate_steps == 0 or done:\n self.train()\n \n self.total_step_counter += 1\n episode_steps += 1\n \n # update the epsilon\n self.epsilon = self.epsilon - self.epsilon_decay if self.epsilon > self.epsilon_final else self.epsilon_final\n # update the target network\n if (episode+1) % self.update_rate_episodes == 0:\n self.q_target_net.model.set_weights(self.q_net.model.get_weights()) \n # save the weights every 10 episodes\n if episode % 10 == 0:\n self.q_net.model.save_weights(os.path.join(self.weights_folder, self.weights_name[0]))\n self.q_target_net.model.save_weights(os.path.join(self.weights_folder, self.weights_name[1]))\n # clear the session to avoid memory leaks\n K.clear_session() \n \n # save the metrics to tensorboard\n with self.writer.as_default():\n tf.summary.scalar('loss', self.loss, step=episode)\n tf.summary.scalar('epsilon', self.epsilon, step=episode)\n tf.summary.scalar('reward', self.discounted_reward, step=episode)\n tf.summary.scalar('episode_steps', episode_steps, step=episode)\n self.writer.flush()\n \n self.training_time = time.time() - start_training_time\n print(\"Training time: {}\".format(self.training_time))\n \n def evaluate_model(self, episodes, swingUp=False, render=True, plot=True, verbose=False, final=False):\n \"\"\"\n Evaluate the model for a number of episodes\n \"\"\"\n\n # load the weights from the final results folder\n if final:\n self.q_net.model.load_weights(os.path.join(self.final_weights_folder, self.weights_name[0]))\n self.q_target_net.model.load_weights(os.path.join(self.final_weights_folder, self.weights_name[1]))\n else:\n self.q_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[0]))\n self.q_target_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[1]))\n\n theta_list = []\n theta_dot_list = []\n torque_list = []\n\n for episode in range(episodes):\n\n # set the environment to the initial state (theta=0, theta_dot=0)\n if swingUp:\n # observation = self.env.reset_swingUp()\n observation = self.env.reset_robot(mode=\"home\")\n else:\n observation = self.env.reset()\n done = False\n episode_reward = 0\n\n # evaluate the model for a number of steps\n while not done:\n if render:\n self.env.render()\n \n action = self.policy(observation, 'greedy')\n new_observation, reward, done = self.env.step(self.env.actions[action])\n new_observation_copy = copy.copy(new_observation)\n episode_reward += reward\n \n observation = new_observation_copy\n \n # append the angle, angular velocity and torque to the lists\n if self.nJoint == 1:\n theta_list.append(observation[0])\n theta_dot_list.append(observation[1])\n torque_list.append(self.env.actions[action])\n else:\n theta_list.append([observation[0], observation[2]])\n theta_dot_list.append([observation[1], observation[3]])\n torque_list.append([self.env.actions[action], 0.0])\n\n if verbose:\n print(\"Episode: {}, Step: {}, Reward: {}\".format(episode, self.env.iterCount, episode_reward))\n \n if plot:\n # plot the angle, angular velocity and torque using sns\n sns.set()\n # plot the angle\n # if the pendulum is single\n if self.nJoint == 1:\n # plot the angles\n plt.plot(theta_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Angle\")\n plt.legend([\"q\"])\n plt.title(\"Swing Up Angle\")\n plt.show()\n # plot the angular velocities\n plt.plot(theta_dot_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Angular Velocity\")\n plt.legend([\"dq\"])\n plt.title(\"Swing Up Angular Velocity\")\n plt.show()\n # plot the torques\n plt.plot(torque_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Torque\")\n plt.legend([\"tau\"])\n plt.title(\"Swing Up Torque\")\n plt.show()\n # if the pendulum is double\n else:\n # plot the angles\n plt.plot(theta_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Angles\")\n plt.legend([\"q1\", \"q2\"])\n plt.title(\"Swing Up Angles\")\n plt.show()\n # plot the angular velocities\n plt.plot(theta_dot_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Angular Velocities\")\n plt.legend([\"dq1\", \"dq2\"])\n plt.title(\"Swing Up Angular Velocities\")\n plt.show()\n # plot the torques\n plt.plot(torque_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Torques\")\n plt.legend([\"tau1\", \"tau2\"])\n plt.title(\"Swing Up Torques\")\n plt.show()\n\n def plot_value_policy(self, visual='2D', resolution=10, final=False):\n \"\"\"\n Plot the value function and the policy of single pendulum\n \"\"\"\n # Load the weights from the final results folder\n if final:\n self.q_net.model.load_weights(os.path.join(self.final_weights_folder, self.weights_name[0]))\n self.q_target_net.model.load_weights(os.path.join(self.final_weights_folder, self.weights_name[1]))\n else:\n self.q_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[0]))\n self.q_target_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[1]))\n\n # Discretize the state space\n theta = np.linspace(-np.pi, np.pi, resolution)\n theta_dot = np.linspace(-self.env.vmax, self.env.vmax, resolution)\n\n # Create meshgrid\n theta_mesh, theta_dot_mesh = np.meshgrid(theta, theta_dot)\n\n # Initialize value function and policy arrays\n V = np.zeros_like(theta_mesh)\n P = np.zeros_like(theta_mesh)\n\n # Iterate over each state in the meshgrid\n for i in range(resolution):\n for j in range(resolution):\n state = np.array([theta_mesh[i, j], theta_dot_mesh[i, j]])\n state_tensor = tf.constant(state, dtype=tf.float32)\n q_values = self.q_net.model(state_tensor[None])[0]\n V[i, j] = tf.reduce_max(q_values)\n P[i, j] = tf.argmax(q_values)\n # map the action index to the action value\n P[i, j] = self.env.actions[int(P[i, j])]\n \n if visual=='3D':\n # Set the viewing angles\n elevation = 90 # Viewing angle from above\n azimuth = -90 # Rotate around the z-axis\n\n # Create 3D plots\n fig = plt.figure(figsize=(10, 5))\n ax1 = fig.add_subplot(121, projection='3d')\n value_surf = ax1.plot_surface(theta_mesh, theta_dot_mesh, V, cmap=cm.viridis)\n ax1.view_init(elevation, azimuth) # Set the viewing angles\n ax1.set_xlabel('q')\n ax1.set_ylabel('dq')\n ax1.set_zlabel('Value')\n ax1.set_title('Value Function')\n fig.colorbar(value_surf, shrink=0.5, aspect=5)\n\n ax2 = fig.add_subplot(122, projection='3d')\n policy_surf = ax2.plot_surface(theta_mesh, theta_dot_mesh, P, cmap=cm.Spectral)\n ax2.view_init(elevation, azimuth) # Set the viewing angles\n ax2.set_xlabel('q')\n ax2.set_ylabel('dq')\n ax2.set_zlabel('Action')\n ax2.set_title('Policy Function')\n fig.colorbar(policy_surf, shrink=0.5, aspect=5)\n else:\n # Set Seaborn style\n sns.set()\n\n # Create 2D plots with colormaps using Seaborn\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n\n # Plot the value function\n ax1 = axes[0]\n sns.heatmap(V, cmap='viridis', ax=ax1, cbar=True)\n # set ticks as theta and theta_dot\n ax1.set_xticks(np.linspace(0, resolution, 5))\n ax1.set_xticklabels([-3, -1, 0, 1, 3])\n ax1.set_yticks(np.linspace(0, resolution, 5))\n ax1.set_yticklabels(np.linspace(-self.env.vmax, self.env.vmax, 5, dtype=int))\n ax1.set_xlabel('q')\n ax1.set_ylabel('dq')\n ax1.set_title('Value Function')\n\n # Plot the policy\n ax2 = axes[1]\n sns.heatmap(P, cmap='Spectral', ax=ax2, cbar=True)\n # set ticks as theta and theta_dot\n ax2.set_xticks(np.linspace(0, resolution, 5))\n ax2.set_xticklabels([-3, -1, 0, 1, 3])\n ax2.set_yticks(np.linspace(0, resolution, 5))\n ax2.set_yticklabels(np.linspace(-self.env.vmax, self.env.vmax, 5, dtype=int))\n ax2.set_xlabel('q')\n ax2.set_ylabel('dq')\n ax2.set_title('Policy Function')\n plt.tight_layout()\n\n plt.show()\n\n def parse_ini(self, ini_file):\n \"\"\"\n Parse the ini file with the env parameters\n \"\"\"\n config = configparser.ConfigParser()\n config.read(ini_file)\n\n if self.env.name == 'RealPendulum' or self.env.name == 'PyBulletPendulum' or self.env.name == '1-fakeenv':\n # parse the 'rotary_pendulum' section\n return config['rotary_pendulum']\n else:\n # raise an exception if the environment is not supported\n return Exception(\"Environment not supported\")\n \n def save_metrics(self, episode, episode_reward, last_loss, last_epsilon, episode_time):\n \"\"\"\n Save the metrics in a dataframe and export it to a csv file\n \"\"\"\n # if the dataframe is empty, create it\n if self.metrics_df.empty:\n self.metrics_df = pd.DataFrame(columns=['episode', 'reward', 'loss', 'epsilon', 'time'])\n timestamp_ep = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n self.metrics_name = self.name_model + '_' + timestamp_ep + '.csv'\n \n # append the metrics to the dataframe using iloc\n self.metrics_df.loc[len(self.metrics_df)] = [episode, episode_reward, last_loss, last_epsilon, episode_time]\n # export the dataframe to a csv file with timestamp\n self.metrics_df.to_csv(os.path.join(self.metrics_folder, self.metrics_name), index=False)"
}
] | from ..Environments import RealPendulumEnv as real
from ..Environments import PyBulletPendulumEnv as pb
from ..Environments import FakeEnv as fake
from ..DQN.Agent import Agent | 10,453 |
isFake = False
isPyBullet = True
isReal = False
train = True
plot_colormaps = False
# select the environment
if isFake:
env = fake.FakeEnv(1)
elif isPyBullet:
env = pb.PyBulletPendulumEnv(render_mode='human')
elif isReal:
|
isFake = False
isPyBullet = True
isReal = False
train = True
plot_colormaps = False
# select the environment
if isFake:
env = fake.FakeEnv(1)
elif isPyBullet:
env = pb.PyBulletPendulumEnv(render_mode='human')
elif isReal: | env = real.RealPendulumEnv("COM3", 115200) | 0 | 2023-12-09 11:22:54+00:00 | 12k |
tommy-xq/SA2VP | vpt_main/src/models/build_model.py | [
{
"identifier": "ResNet",
"path": "vpt_main/src/models/resnet.py",
"snippet": "class ResNet(nn.Module):\n \"\"\"ResNet model.\"\"\"\n\n def __init__(self, cfg):\n super(ResNet, self).__init__()\n self.cfg = cfg\n\n model_type = cfg.DATA.FEATURE\n model = self.get_pretrained_model(model_type)\n\n if \"prompt\" in cfg.MODEL.TRANSFER_TYPE:\n # setup prompt_embd and modify model accordingly\n model = self.setup_prompt(cfg.MODEL.PROMPT, model)\n else:\n self.prompt_embeddings = None\n\n # setup side network if needed\n self.setup_side()\n # set which parameters require grad\n # creat self.prompt_layers, self.frozen_layers, self.tuned_layers\n self.setup_grad(model)\n # create self.head\n self.setup_head(cfg)\n\n def setup_side(self):\n if self.cfg.MODEL.TRANSFER_TYPE != \"side\":\n self.side = None\n else:\n self.side_alpha = nn.Parameter(torch.tensor(0.0))\n out_dim = self.get_outputdim()\n m = models.alexnet(pretrained=True)\n self.side = nn.Sequential(OrderedDict([\n (\"features\", m.features),\n (\"avgpool\", m.avgpool),\n ]))\n self.side_projection = nn.Linear(9216, out_dim, bias=False)\n\n def setup_grad(self, model):\n transfer_type = self.cfg.MODEL.TRANSFER_TYPE\n # split enc into 3 parts:\n # prompt_layers frozen_layers tuned_layers\n # partial-1 identity -layer3 layer4\n # partial-2: identity -layer2 \"layer4\" \"layer3\"\n # partial-3: identity -layer1 \"layer4\" \"layer3\" \"layer2\"\n # linear identity all identity\n # end2end identity identity all\n\n # prompt-below conv1 all but conv1\n # prompt-pad identity all\n\n if transfer_type == \"prompt\" and self.cfg.MODEL.PROMPT.LOCATION == \"below\": # noqa\n self.prompt_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n ]))\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n self.tuned_layers = nn.Identity()\n else:\n # partial, linear, end2end, prompt-pad\n self.prompt_layers = nn.Identity()\n\n if transfer_type == \"partial-0\":\n # last conv block\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4[:-1]),\n ]))\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"layer4\", model.layer4[-1]),\n (\"avgpool\", model.avgpool),\n ]))\n elif transfer_type == \"partial-1\":\n # tune last layer\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n ]))\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n\n elif transfer_type == \"partial-2\":\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n ]))\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n\n elif transfer_type == \"partial-3\":\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n ]))\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n\n elif transfer_type == \"linear\" or transfer_type == \"side\" or transfer_type == \"tinytl-bias\":\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n self.tuned_layers = nn.Identity()\n\n elif transfer_type == \"end2end\":\n self.frozen_layers = nn.Identity()\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n\n elif transfer_type == \"prompt\" and self.cfg.MODEL.PROMPT.LOCATION == \"pad\": # noqa\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n self.tuned_layers = nn.Identity()\n\n if transfer_type == \"tinytl-bias\":\n for k, p in self.frozen_layers.named_parameters():\n if 'bias' not in k:\n p.requires_grad = False\n else:\n for k, p in self.frozen_layers.named_parameters():\n p.requires_grad = False\n self.transfer_type = transfer_type\n\n def setup_prompt(self, prompt_config, model):\n # ONLY support below and pad\n self.prompt_location = prompt_config.LOCATION\n self.num_tokens = prompt_config.NUM_TOKENS\n if prompt_config.LOCATION == \"below\":\n return self._setup_prompt_below(prompt_config, model)\n elif prompt_config.LOCATION == \"pad\":\n return self._setup_prompt_pad(prompt_config, model)\n else:\n raise ValueError(\n \"ResNet models cannot use prompt location {}\".format(\n prompt_config.LOCATION))\n\n def _setup_prompt_below(self, prompt_config, model):\n if prompt_config.INITIATION == \"random\":\n self.prompt_embeddings = nn.Parameter(torch.zeros(\n 1, self.num_tokens,\n self.cfg.DATA.CROPSIZE, self.cfg.DATA.CROPSIZE\n ))\n nn.init.uniform_(self.prompt_embeddings.data, 0.0, 1.0)\n self.prompt_norm = tv.transforms.Normalize(\n mean=[sum([0.485, 0.456, 0.406])/3] * self.num_tokens,\n std=[sum([0.229, 0.224, 0.225])/3] * self.num_tokens,\n )\n\n elif prompt_config.INITIATION == \"gaussian\":\n self.prompt_embeddings = nn.Parameter(torch.zeros(\n 1, self.num_tokens,\n self.cfg.DATA.CROPSIZE, self.cfg.DATA.CROPSIZE\n ))\n\n nn.init.normal_(self.prompt_embeddings.data)\n\n self.prompt_norm = nn.Identity()\n\n else:\n raise ValueError(\"Other initiation scheme is not supported\")\n\n # modify first conv layer\n old_weight = model.conv1.weight # [64, 3, 7, 7]\n model.conv1 = nn.Conv2d(\n self.num_tokens+3, 64, kernel_size=7,\n stride=2, padding=3, bias=False\n )\n torch.nn.init.xavier_uniform(model.conv1.weight)\n\n model.conv1.weight[:, :3, :, :].data.copy_(old_weight)\n return model\n\n def _setup_prompt_pad(self, prompt_config, model):\n if prompt_config.INITIATION == \"random\":\n self.prompt_embeddings_tb = nn.Parameter(torch.zeros(\n 1, 3, 2 * self.num_tokens,\n self.cfg.DATA.CROPSIZE + 2 * self.num_tokens\n ))\n self.prompt_embeddings_lr = nn.Parameter(torch.zeros(\n 1, 3, self.cfg.DATA.CROPSIZE, 2 * self.num_tokens\n ))\n\n nn.init.uniform_(self.prompt_embeddings_tb.data, 0.0, 1.0)\n nn.init.uniform_(self.prompt_embeddings_lr.data, 0.0, 1.0)\n\n self.prompt_norm = tv.transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n )\n\n elif prompt_config.INITIATION == \"gaussian\":\n self.prompt_embeddings_tb = nn.Parameter(torch.zeros(\n 1, 3, 2 * self.num_tokens,\n self.cfg.DATA.CROPSIZE + 2 * self.num_tokens\n ))\n self.prompt_embeddings_lr = nn.Parameter(torch.zeros(\n 1, 3, self.cfg.DATA.CROPSIZE, 2 * self.num_tokens\n ))\n\n nn.init.normal_(self.prompt_embeddings_tb.data)\n nn.init.normal_(self.prompt_embeddings_lr.data)\n\n self.prompt_norm = nn.Identity()\n else:\n raise ValueError(\"Other initiation scheme is not supported\")\n return model\n\n def get_pretrained_model(self, model_type):\n model_root = self.cfg.MODEL.MODEL_ROOT\n\n if model_type == \"imagenet_sup_rn50\":\n model = models.resnet50(pretrained=True)\n elif model_type == \"imagenet_sup_rn101\":\n model = models.resnet101(pretrained=True) # 2048\n elif model_type == \"imagenet_sup_rn152\":\n model = models.resnet152(pretrained=True) # 2048\n elif model_type == \"imagenet_sup_rn34\":\n model = models.resnet34(pretrained=True) # 512\n elif model_type == \"imagenet_sup_rn18\":\n model = models.resnet18(pretrained=True) # 512\n\n elif model_type == \"inat2021_sup_rn50\":\n checkpoint = torch.load(\n f\"{model_root}/inat2021_supervised_large.pth.tar\",\n map_location=torch.device('cpu')\n )\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n model.load_state_dict(checkpoint['state_dict'], strict=True)\n elif model_type == 'inat2021_mini_sup_rn50':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(\n f\"{model_root}/inat2021_supervised_mini.pth.tar\",\n map_location=torch.device('cpu')\n )\n model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_moco_v2_rn50':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(\n f\"{model_root}/inat2021_moco_v2_mini_1000_ep.pth.tar\",\n map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'imagenet_moco_v2_rn50':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(\n f\"{model_root}/imagenet_moco_v2_800ep_pretrain.pth.tar\",\n map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n model.load_state_dict(state_dict, strict=True)\n\n elif model_type.startswith(\"mocov3_rn50\"):\n moco_epoch = model_type.split(\"ep\")[-1]\n checkpoint = torch.load(\n f\"{model_root}/mocov3_linear-1000ep.pth.tar\",\n map_location=\"cpu\")\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only base_encoder up to before the embedding layer\n if k.startswith('module.'):\n # remove prefix\n state_dict[k[len(\"module.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n model = models.resnet50()\n model.load_state_dict(state_dict, strict=False)\n\n else:\n raise ValueError(\"model type not supported for resnet backbone\")\n\n model.fc = nn.Identity()\n return model\n\n def get_outputdim(self):\n if self.cfg.DATA.FEATURE == \"imagenet_sup_rn34\" or self.cfg.DATA.FEATURE == \"imagenet_sup_rn18\":\n out_dim = 512\n else:\n out_dim = 2048\n return out_dim\n\n def setup_head(self, cfg):\n out_dim = self.get_outputdim()\n self.head = MLP(\n input_dim=out_dim,\n mlp_dims=[out_dim] * self.cfg.MODEL.MLP_NUM + \\\n [cfg.DATA.NUMBER_CLASSES],\n special_bias=True\n )\n\n def incorporate_prompt(self, x):\n B = x.shape[0]\n if self.prompt_location == \"below\":\n x = torch.cat((\n x,\n self.prompt_norm(\n self.prompt_embeddings).expand(B, -1, -1, -1),\n ), dim=1)\n # (B, 3 + num_prompts, crop_size, crop_size)\n\n elif self.prompt_location == \"pad\":\n prompt_emb_lr = self.prompt_norm(\n self.prompt_embeddings_lr).expand(B, -1, -1, -1)\n prompt_emb_tb = self.prompt_norm(\n self.prompt_embeddings_tb).expand(B, -1, -1, -1)\n\n x = torch.cat((\n prompt_emb_lr[:, :, :, :self.num_tokens],\n x, prompt_emb_lr[:, :, :, self.num_tokens:]\n ), dim=-1)\n x = torch.cat((\n prompt_emb_tb[:, :, :self.num_tokens, :],\n x, prompt_emb_tb[:, :, self.num_tokens:, :]\n ), dim=-2)\n # (B, 3, crop_size + num_prompts, crop_size + num_prompts)\n else:\n raise ValueError(\"not supported yet\")\n x = self.prompt_layers(x)\n return x\n\n def forward(self, x, return_feature=False):\n if self.side is not None:\n side_output = self.side(x)\n side_output = side_output.view(side_output.size(0), -1)\n side_output = self.side_projection(side_output)\n\n x = self.get_features(x)\n\n if self.side is not None:\n alpha_squashed = torch.sigmoid(self.side_alpha)\n x = alpha_squashed * x + (1 - alpha_squashed) * side_output\n\n if return_feature:\n return x\n\n return self.head(x)\n\n def get_features(self, x):\n \"\"\"get a (batch_size, 2048) feature\"\"\"\n if self.frozen_layers.training:\n self.frozen_layers.eval()\n\n if \"prompt\" not in self.transfer_type:\n with torch.set_grad_enabled(self.frozen_layers.training):\n x = self.frozen_layers(x)\n else:\n # prompt tuning required frozen_layers saving grad\n x = self.incorporate_prompt(x)\n x = self.frozen_layers(x)\n\n x = self.tuned_layers(x) # batch_size x 2048 x 1\n x = x.view(x.size(0), -1)\n\n return x"
},
{
"identifier": "ConvNeXt",
"path": "vpt_main/src/models/convnext.py",
"snippet": "class ConvNeXt(ResNet):\n \"\"\"\n ConvNeXt model,\n utilizing the ResNet class for structure and prompt setup\n \"\"\"\n\n def __init__(self, cfg):\n if cfg.DATA.FEATURE not in [\n \"imagenet_sup_rnx_tiny\",\n \"imagenet_sup_rnx_small\",\n \"imagenet_sup_rnx_base\",\n \"imagenet22k_sup_rnx_base\",\n \"imagenet22k_sup_rnx_large\",\n \"imagenet22k_sup_rnx_xlarge\",\n ]:\n raise ValueError(\"feature does not support ConvNeXt models\")\n if cfg.MODEL.PROMPT.LOCATION == \"below\":\n raise ValueError(\"Not support prompt-below at the moment\")\n super(ConvNeXt, self).__init__(cfg)\n\n def get_outputdim(self):\n backbone_arch = self.cfg.DATA.FEATURE.split(\"_\")[-1]\n return FEAT2DIM[backbone_arch]\n\n def setup_grad(self, model):\n # TODO: change the name of layers\n \"\"\"\n downsample_layers[0], stages[0]\n downsample_layers[1], stages[1]\n downsample_layers[2], stages[2]\n downsample_layers[3], stages[3]\n norm\n \"\"\"\n self.norm = model.norm\n transfer_type = self.cfg.MODEL.TRANSFER_TYPE\n # split enc into 3 parts:\n # prompt_layers frozen_layers tuned_layers\n # partial-0 identity all but last block\n # stages[-1][-1], stages[-1][-1] + norm\n # linear identity all identity\n # end2end identity identity all\n # prompt-pad identity all\n\n # partial, linear, end2end, prompt-pad\n self.prompt_layers = nn.Identity()\n\n if transfer_type == \"partial-0\":\n # last block to tune\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"downsample_layer1\", model.downsample_layers[0]),\n (\"stage1\", model.stages[0]),\n (\"downsample_layer2\", model.downsample_layers[1]),\n (\"stage2\", model.stages[1]),\n (\"downsample_layer3\", model.downsample_layers[2]),\n (\"stage3\", model.stages[2]),\n (\"downsample_layer4\", model.downsample_layers[3]),\n (\"stage4\", model.stages[3][:-1]),\n ]))\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"stage4\", model.stages[3][-1]),\n ]))\n self.tune_norm = True\n\n elif transfer_type == \"linear\" or transfer_type == \"side\" or transfer_type == \"tinytl-bias\": # noqa\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"downsample_layer1\", model.downsample_layers[0]),\n (\"stage1\", model.stages[0]),\n (\"downsample_layer2\", model.downsample_layers[1]),\n (\"stage2\", model.stages[1]),\n (\"downsample_layer3\", model.downsample_layers[2]),\n (\"stage3\", model.stages[2]),\n (\"downsample_layer4\", model.downsample_layers[3]),\n (\"stage4\", model.stages[3]),\n ]))\n self.tuned_layers = nn.Identity()\n self.tune_norm = False\n\n elif transfer_type == \"end2end\":\n self.frozen_layers = nn.Identity()\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"downsample_layer1\", model.downsample_layers[0]),\n (\"stage1\", model.stages[0]),\n (\"downsample_layer2\", model.downsample_layers[1]),\n (\"stage2\", model.stages[1]),\n (\"downsample_layer3\", model.downsample_layers[2]),\n (\"stage3\", model.stages[2]),\n (\"downsample_layer4\", model.downsample_layers[3]),\n (\"stage4\", model.stages[3]),\n ]))\n self.tune_norm = True\n\n elif transfer_type == \"prompt\" and self.cfg.MODEL.PROMPT.LOCATION == \"pad\": # noqa\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"downsample_layer1\", model.downsample_layers[0]),\n (\"stage1\", model.stages[0]),\n (\"downsample_layer2\", model.downsample_layers[1]),\n (\"stage2\", model.stages[1]),\n (\"downsample_layer3\", model.downsample_layers[2]),\n (\"stage3\", model.stages[2]),\n (\"downsample_layer4\", model.downsample_layers[3]),\n (\"stage4\", model.stages[3]),\n ]))\n self.tuned_layers = nn.Identity()\n self.tune_norm = False\n\n if transfer_type == \"tinytl-bias\":\n for k, p in self.frozen_layers.named_parameters():\n if 'bias' not in k:\n p.requires_grad = False\n else:\n for k, p in self.frozen_layers.named_parameters():\n p.requires_grad = False\n\n if not self.tune_norm:\n for k, p in self.norm.named_parameters():\n p.requires_grad = False\n self.transfer_type = transfer_type\n\n def _setup_prompt_below(self, prompt_config, model):\n # TODO:\n # the only difference btw this function and that of the ResNet class is the name of the first layer\n if prompt_config.INITIATION == \"random\":\n self.prompt_embeddings = nn.Parameter(torch.zeros(\n 1, self.num_tokens,\n self.cfg.DATA.CROPSIZE, self.cfg.DATA.CROPSIZE\n ))\n nn.init.uniform_(self.prompt_embeddings.data, 0.0, 1.0)\n self.prompt_norm = tv.transforms.Normalize(\n mean=[sum([0.485, 0.456, 0.406])/3] * self.num_tokens,\n std=[sum([0.229, 0.224, 0.225])/3] * self.num_tokens,\n )\n\n elif prompt_config.INITIATION == \"gaussian\":\n self.prompt_embeddings = nn.Parameter(torch.zeros(\n 1, self.num_tokens,\n self.cfg.DATA.CROPSIZE, self.cfg.DATA.CROPSIZE\n ))\n\n nn.init.normal_(self.prompt_embeddings.data)\n self.prompt_norm = nn.Identity()\n\n else:\n raise ValueError(\"Other initiation scheme is not supported\")\n\n # modify first conv layer\n old_weight = model.downsample_layers[0][0].weight # [*, 3, 4, 4]\n model.downsample_layers[0][0] = nn.Conv2d(\n self.num_tokens+3, old_weight.shape[0], kernel_size=4, stride=4\n )\n trunc_normal_(model.downsample_layers[0][0].weight, std=.02)\n torch.nn.init.constant_(model.downsample_layers[0][0].bias, 0)\n\n model.downsample_layers[0][0].weight[:, :3, :, :].data.copy_(old_weight)\n return model\n\n def get_pretrained_model(self, model_type):\n backbone_arch = model_type.split(\"_\")[-1]\n is_22k = \"22k\" in model_type\n if is_22k:\n # need to specify num_classes, o.w. throw error of weight size mismatch\n num_classes = 21841\n else:\n num_classes = 1000\n\n if backbone_arch == \"tiny\":\n model = convnext_tiny(pretrained=True)\n elif backbone_arch == \"small\":\n model = convnext_small(pretrained=True)\n elif backbone_arch == \"base\":\n model = convnext_base(\n pretrained=True, in_22k=is_22k, num_classes=num_classes)\n elif backbone_arch == \"large\":\n model = convnext_large(\n pretrained=True, in_22k=is_22k, num_classes=num_classes)\n elif backbone_arch == \"xlarge\":\n model = convnext_xlarge(\n pretrained=True, in_22k=is_22k, num_classes=num_classes)\n else:\n raise ValueError(\"model type not supported for resnet backbone\")\n\n model.head = nn.Identity()\n return model\n\n def get_features(self, x):\n \"\"\"get a (batch_size, feat_dim) feature\"\"\"\n if self.frozen_layers.training:\n self.frozen_layers.eval()\n\n if \"prompt\" not in self.transfer_type:\n with torch.set_grad_enabled(self.frozen_layers.training):\n x = self.frozen_layers(x)\n else:\n # prompt tuning required frozen_layers saving grad\n x = self.incorporate_prompt(x)\n x = self.frozen_layers(x)\n\n x = self.tuned_layers(x) # batch_size x 2048 x h x w\n x = self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)\n return x"
},
{
"identifier": "ViT",
"path": "vpt_main/src/models/vit_models.py",
"snippet": "class ViT(nn.Module):\n \"\"\"ViT-related model.\"\"\"\n\n def __init__(self, cfg, load_pretrain=True, vis=False):\n super(ViT, self).__init__()\n\n if \"prompt\" in cfg.MODEL.TRANSFER_TYPE:\n prompt_cfg = cfg.MODEL.PROMPT\n else:\n prompt_cfg = None\n\n if cfg.MODEL.TRANSFER_TYPE != \"end2end\" and \"prompt\" not in cfg.MODEL.TRANSFER_TYPE:\n # linear, cls, tiny-tl, parital, adapter\n self.froze_enc = True\n else:\n # prompt, end2end, cls+prompt\n self.froze_enc = False\n \n if cfg.MODEL.TRANSFER_TYPE == \"adapter\":\n adapter_cfg = cfg.MODEL.ADAPTER\n else:\n adapter_cfg = None\n\n self.build_backbone(\n prompt_cfg, cfg, adapter_cfg, load_pretrain, vis=vis)\n self.cfg = cfg\n self.setup_side()\n self.setup_head(cfg)\n\n def setup_side(self):\n if self.cfg.MODEL.TRANSFER_TYPE != \"side\":\n self.side = None\n else:\n self.side_alpha = nn.Parameter(torch.tensor(0.0))\n m = models.alexnet(pretrained=True)\n self.side = nn.Sequential(OrderedDict([\n (\"features\", m.features),\n (\"avgpool\", m.avgpool),\n ]))\n self.side_projection = nn.Linear(9216, self.feat_dim, bias=False)\n\n def build_backbone(self, prompt_cfg, cfg, adapter_cfg, load_pretrain, vis):\n transfer_type = cfg.MODEL.TRANSFER_TYPE\n self.enc, self.feat_dim = build_vit_sup_models(\n cfg.DATA.FEATURE, cfg.DATA.CROPSIZE, prompt_cfg, cfg.MODEL.MODEL_ROOT, adapter_cfg, load_pretrain, vis\n )\n\n # linear, prompt, cls, cls+prompt, partial_1\n if transfer_type == \"partial-1\":\n total_layer = len(self.enc.transformer.encoder.layer)\n # tuned_params = [\n # \"transformer.encoder.layer.{}\".format(i-1) for i in range(total_layer)]\n for k, p in self.enc.named_parameters():\n if \"transformer.encoder.layer.{}\".format(total_layer - 1) not in k and \"transformer.encoder.encoder_norm\" not in k: # noqa\n p.requires_grad = False\n elif transfer_type == \"partial-2\":\n total_layer = len(self.enc.transformer.encoder.layer)\n for k, p in self.enc.named_parameters():\n if \"transformer.encoder.layer.{}\".format(total_layer - 1) not in k and \"transformer.encoder.layer.{}\".format(total_layer - 2) not in k and \"transformer.encoder.encoder_norm\" not in k: # noqa\n p.requires_grad = False\n\n elif transfer_type == \"partial-4\":\n total_layer = len(self.enc.transformer.encoder.layer)\n for k, p in self.enc.named_parameters():\n if \"transformer.encoder.layer.{}\".format(total_layer - 1) not in k and \"transformer.encoder.layer.{}\".format(total_layer - 2) not in k and \"transformer.encoder.layer.{}\".format(total_layer - 3) not in k and \"transformer.encoder.layer.{}\".format(total_layer - 4) not in k and \"transformer.encoder.encoder_norm\" not in k: # noqa\n p.requires_grad = False\n\n elif transfer_type == \"linear\" or transfer_type == \"side\":\n for k, p in self.enc.named_parameters():\n p.requires_grad = False\n\n elif transfer_type == \"tinytl-bias\":\n for k, p in self.enc.named_parameters():\n if 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\" and prompt_cfg.LOCATION == \"below\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and \"embeddings.patch_embeddings.weight\" not in k and \"embeddings.patch_embeddings.bias\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt+bias\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt-noupdate\":\n for k, p in self.enc.named_parameters():\n p.requires_grad = False\n\n elif transfer_type == \"cls\":\n for k, p in self.enc.named_parameters():\n if \"cls_token\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"cls-reinit\":\n nn.init.normal_(\n self.enc.transformer.embeddings.cls_token,\n std=1e-6\n )\n\n for k, p in self.enc.named_parameters():\n if \"cls_token\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"cls+prompt\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and \"cls_token\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"cls-reinit+prompt\":\n nn.init.normal_(\n self.enc.transformer.embeddings.cls_token,\n std=1e-6\n )\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and \"cls_token\" not in k:\n p.requires_grad = False\n \n # adapter\n elif transfer_type == \"adapter\":\n for k, p in self.enc.named_parameters():\n if \"adapter\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"end2end\":\n logger.info(\"Enable all parameters update during training\")\n\n else:\n raise ValueError(\"transfer type {} is not supported\".format(\n transfer_type))\n\n def setup_head(self, cfg):\n self.head = MLP(\n input_dim=self.feat_dim,\n mlp_dims=[self.feat_dim] * self.cfg.MODEL.MLP_NUM + \\\n [cfg.DATA.NUMBER_CLASSES], # noqa\n special_bias=True\n )\n\n def forward(self, x, return_feature=False):\n if self.side is not None:\n side_output = self.side(x)\n side_output = side_output.view(side_output.size(0), -1)\n side_output = self.side_projection(side_output)\n\n if self.froze_enc and self.enc.training:\n self.enc.eval()\n x = self.enc(x) # batch_size x self.feat_dim\n\n if self.side is not None:\n alpha_squashed = torch.sigmoid(self.side_alpha)\n x = alpha_squashed * x + (1 - alpha_squashed) * side_output\n\n if return_feature:\n return x, x\n x = self.head(x)\n\n return x\n \n def forward_cls_layerwise(self, x):\n cls_embeds = self.enc.forward_cls_layerwise(x)\n return cls_embeds\n\n def get_features(self, x):\n \"\"\"get a (batch_size, self.feat_dim) feature\"\"\"\n x = self.enc(x) # batch_size x self.feat_dim\n return x"
},
{
"identifier": "Swin",
"path": "vpt_main/src/models/vit_models.py",
"snippet": "class Swin(ViT):\n \"\"\"Swin-related model.\"\"\"\n\n def __init__(self, cfg):\n super(Swin, self).__init__(cfg)\n\n def build_backbone(self, prompt_cfg, cfg, adapter_cfg, load_pretrain, vis):\n transfer_type = cfg.MODEL.TRANSFER_TYPE\n self.enc, self.feat_dim = build_swin_model(\n cfg.DATA.FEATURE, cfg.DATA.CROPSIZE,\n prompt_cfg, cfg.MODEL.MODEL_ROOT\n )\n\n # linear, prompt, cls, cls+prompt, partial_1\n if transfer_type == \"partial-1\":\n total_layer = len(self.enc.layers)\n total_blocks = len(self.enc.layers[-1].blocks)\n for k, p in self.enc.named_parameters():\n if \"layers.{}.blocks.{}\".format(total_layer - 1, total_blocks - 1) not in k and \"norm.weight\" != k and \"norm.bias\" != k: # noqa\n p.requires_grad = False\n\n elif transfer_type == \"partial-2\":\n total_layer = len(self.enc.layers)\n for k, p in self.enc.named_parameters():\n if \"layers.{}\".format(total_layer - 1) not in k and \"norm.weight\" != k and \"norm.bias\" != k: # noqa\n p.requires_grad = False\n\n elif transfer_type == \"partial-4\":\n total_layer = len(self.enc.layers)\n total_blocks = len(self.enc.layers[-2].blocks)\n\n for k, p in self.enc.named_parameters():\n if \"layers.{}\".format(total_layer - 1) not in k and \"layers.{}.blocks.{}\".format(total_layer - 2, total_blocks - 1) not in k and \"layers.{}.blocks.{}\".format(total_layer - 2, total_blocks - 2) not in k and \"layers.{}.downsample\".format(total_layer - 2) not in k and \"norm.weight\" != k and \"norm.bias\" != k: # noqa\n p.requires_grad = False\n\n elif transfer_type == \"linear\" or transfer_type == \"side\":\n for k, p in self.enc.named_parameters():\n p.requires_grad = False\n\n elif transfer_type == \"tinytl-bias\":\n for k, p in self.enc.named_parameters():\n if 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\" and prompt_cfg.LOCATION in [\"below\"]:\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and \"patch_embed\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt+bias\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"end2end\":\n logger.info(\"Enable all parameters update during training\")\n\n else:\n raise ValueError(\"transfer type {} is not supported\".format(\n transfer_type))"
},
{
"identifier": "SSLViT",
"path": "vpt_main/src/models/vit_models.py",
"snippet": "class SSLViT(ViT):\n \"\"\"moco-v3 and mae model.\"\"\"\n\n def __init__(self, cfg):\n super(SSLViT, self).__init__(cfg)\n\n def build_backbone(self, prompt_cfg, cfg, adapter_cfg, load_pretrain, vis):\n if \"moco\" in cfg.DATA.FEATURE:\n build_fn = build_mocov3_model\n elif \"mae\" in cfg.DATA.FEATURE:\n build_fn = build_mae_model\n\n self.enc, self.feat_dim = build_fn(\n cfg.DATA.FEATURE, cfg.DATA.CROPSIZE,\n prompt_cfg, cfg.MODEL.MODEL_ROOT, adapter_cfg=adapter_cfg\n )\n\n transfer_type = cfg.MODEL.TRANSFER_TYPE\n # linear, prompt, cls, cls+prompt, partial_1\n if transfer_type == \"partial-1\":\n total_layer = len(self.enc.blocks)\n for k, p in self.enc.named_parameters():\n if \"blocks.{}\".format(total_layer - 1) not in k and \"fc_norm\" not in k and k != \"norm\": # noqa\n p.requires_grad = False\n elif transfer_type == \"partial-2\":\n total_layer = len(self.enc.blocks)\n for k, p in self.enc.named_parameters():\n if \"blocks.{}\".format(total_layer - 1) not in k and \"blocks.{}\".format(total_layer - 2) not in k and \"fc_norm\" not in k and k != \"norm\": # noqa\n p.requires_grad = False\n\n elif transfer_type == \"partial-4\":\n total_layer = len(self.enc.blocks)\n for k, p in self.enc.named_parameters():\n if \"blocks.{}\".format(total_layer - 1) not in k and \"blocks.{}\".format(total_layer - 2) not in k and \"blocks.{}\".format(total_layer - 3) not in k and \"blocks.{}\".format(total_layer - 4) not in k and \"fc_norm\" not in k and k != \"norm\": # noqa\n p.requires_grad = False\n\n elif transfer_type == \"linear\" or transfer_type == \"sidetune\":\n for k, p in self.enc.named_parameters():\n p.requires_grad = False\n\n elif transfer_type == \"tinytl-bias\":\n for k, p in self.enc.named_parameters():\n if 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt+bias\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\" and prompt_cfg.LOCATION == \"below\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and \"patch_embed.proj.weight\" not in k and \"patch_embed.proj.bias\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"end2end\":\n logger.info(\"Enable all parameters update during training\")\n \n # adapter\n elif transfer_type == \"adapter\":\n for k, p in self.enc.named_parameters():\n if \"adapter\" not in k:\n p.requires_grad = False\n\n else:\n raise ValueError(\"transfer type {} is not supported\".format(\n transfer_type))"
},
{
"identifier": "logging",
"path": "vpt_main/src/utils/logging.py",
"snippet": "_FORMAT = \"[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s\"\ndef _suppress_print():\n def print_pass(*objects, sep=\" \", end=\"\\n\", file=sys.stdout, flush=False):\ndef _cached_log_stream(filename):\ndef setup_logging(\n num_gpu, num_shards, output=\"\", name=\"visual_prompt\", color=True):\ndef setup_single_logging(name, output=\"\"):\ndef get_logger(name):\ndef log_json_stats(stats, sort_keys=True):\n def __init__(self, *args, **kwargs):\n def formatMessage(self, record: logging.LogRecord) -> str:\nclass _ColorfulFormatter(logging.Formatter):"
}
] | from tabnanny import verbose
from .resnet import ResNet
from .convnext import ConvNeXt
from .vit_models import ViT, Swin, SSLViT
from ..utils import logging
import torch | 10,118 | #!/usr/bin/env python3
"""
Model construction functions.
"""
logger = logging.get_logger("visual_prompt")
# Supported model types
_MODEL_TYPES = {
"resnet": ResNet,
"convnext": ConvNeXt,
"vit": ViT,
| #!/usr/bin/env python3
"""
Model construction functions.
"""
logger = logging.get_logger("visual_prompt")
# Supported model types
_MODEL_TYPES = {
"resnet": ResNet,
"convnext": ConvNeXt,
"vit": ViT, | "swin": Swin, | 3 | 2023-12-12 13:19:17+00:00 | 12k |
KULL-Centre/_2023_Blaabjerg_SSEmb | src/models/msa_transformer/modules.py | [
{
"identifier": "MultiheadAttention",
"path": "src/models/msa_transformer/multihead_attention.py",
"snippet": "class MultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n kdim=None,\n vdim=None,\n dropout=0.0,\n bias=True,\n add_bias_kv: bool = False,\n add_zero_attn: bool = False,\n self_attention: bool = False,\n encoder_decoder_attention: bool = False,\n use_rotary_embeddings: bool = False,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim\n\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim**-0.5\n\n self.self_attention = self_attention\n self.encoder_decoder_attention = encoder_decoder_attention\n\n assert not self.self_attention or self.qkv_same_dim, (\n \"Self-attention requires query, key and \" \"value to be of the same size\"\n )\n\n self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n\n self.add_zero_attn = add_zero_attn\n\n self.reset_parameters()\n\n self.onnx_trace = False\n self.rot_emb = None\n if use_rotary_embeddings:\n self.rot_emb = RotaryEmbedding(dim=self.head_dim)\n\n self.enable_torch_version = False\n if hasattr(F, \"multi_head_attention_forward\"):\n self.enable_torch_version = True\n else:\n self.enable_torch_version = False\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def reset_parameters(self):\n if self.qkv_same_dim:\n # Empirically observed the convergence to be much better with\n # the scaled initialization\n nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))\n else:\n nn.init.xavier_uniform_(self.k_proj.weight)\n nn.init.xavier_uniform_(self.v_proj.weight)\n nn.init.xavier_uniform_(self.q_proj.weight)\n\n nn.init.xavier_uniform_(self.out_proj.weight)\n if self.out_proj.bias is not None:\n nn.init.constant_(self.out_proj.bias, 0.0)\n if self.bias_k is not None:\n nn.init.xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n nn.init.xavier_normal_(self.bias_v)\n\n def forward(\n self,\n query,\n key: Optional[Tensor],\n value: Optional[Tensor],\n key_padding_mask: Optional[Tensor] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n need_weights: bool = True,\n static_kv: bool = False,\n attn_mask: Optional[Tensor] = None,\n before_softmax: bool = False,\n need_head_weights: bool = False,\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Input shape: Time x Batch x Channel\n\n Args:\n key_padding_mask (ByteTensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where\n padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (ByteTensor, optional): typically used to\n implement causal attention, where the mask prevents the\n attention from looking forward in time (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n \"\"\"\n if need_head_weights:\n need_weights = True\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n\n if (\n not self.rot_emb\n and self.enable_torch_version\n and not self.onnx_trace\n and incremental_state is None\n and not static_kv\n # A workaround for quantization to work. Otherwise JIT compilation\n # treats bias in linear module as method.\n and not torch.jit.is_scripting()\n and not need_head_weights\n ):\n assert key is not None and value is not None\n return F.multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n )\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if saved_state is not None and \"prev_key\" in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert self.encoder_decoder_attention and not self.self_attention\n key = value = None\n else:\n saved_state = None\n\n if self.self_attention:\n q = self.q_proj(query)\n k = self.k_proj(query)\n v = self.v_proj(query)\n elif self.encoder_decoder_attention:\n # encoder-decoder attention\n q = self.q_proj(query)\n if key is None:\n assert value is None\n k = v = None\n else:\n k = self.k_proj(key)\n v = self.v_proj(key)\n\n else:\n assert key is not None and value is not None\n q = self.q_proj(query)\n k = self.k_proj(key)\n v = self.v_proj(value)\n q *= self.scaling\n\n if self.bias_k is not None:\n assert self.bias_v is not None\n k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n key_padding_mask.new_zeros(key_padding_mask.size(0), 1),\n ],\n dim=1,\n )\n\n q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if k is not None:\n k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if v is not None:\n v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n\n if saved_state is not None:\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n if \"prev_key\" in saved_state:\n _prev_key = saved_state[\"prev_key\"]\n assert _prev_key is not None\n prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n k = prev_key\n else:\n assert k is not None\n k = torch.cat([prev_key, k], dim=1)\n if \"prev_value\" in saved_state:\n _prev_value = saved_state[\"prev_value\"]\n assert _prev_value is not None\n prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n v = prev_value\n else:\n assert v is not None\n v = torch.cat([prev_value, v], dim=1)\n prev_key_padding_mask: Optional[Tensor] = None\n if \"prev_key_padding_mask\" in saved_state:\n prev_key_padding_mask = saved_state[\"prev_key_padding_mask\"]\n assert k is not None and v is not None\n key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(\n key_padding_mask=key_padding_mask,\n prev_key_padding_mask=prev_key_padding_mask,\n batch_size=bsz,\n src_len=k.size(1),\n static_kv=static_kv,\n )\n\n saved_state[\"prev_key\"] = k.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_value\"] = v.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_key_padding_mask\"] = key_padding_mask\n # In this branch incremental_state is never None\n assert incremental_state is not None\n incremental_state = self._set_input_buffer(incremental_state, saved_state)\n assert k is not None\n src_len = k.size(1)\n\n # This is part of a workaround to get around fork/join parallelism\n # not supporting Optional types.\n if key_padding_mask is not None and key_padding_mask.dim() == 0:\n key_padding_mask = None\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n if self.add_zero_attn:\n assert v is not None\n src_len += 1\n k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)\n v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask),\n ],\n dim=1,\n )\n\n if self.rot_emb:\n q, k = self.rot_emb(q, k)\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)\n\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(0)\n if self.onnx_trace:\n attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)\n attn_weights += attn_mask\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float(\"-inf\")\n )\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if before_softmax:\n return attn_weights, v\n\n attn_weights_float = utils_softmax(attn_weights, dim=-1, onnx_trace=self.onnx_trace)\n attn_weights = attn_weights_float.type_as(attn_weights)\n attn_probs = F.dropout(\n attn_weights_float.type_as(attn_weights),\n p=self.dropout,\n training=self.training,\n )\n assert v is not None\n attn = torch.bmm(attn_probs, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n if self.onnx_trace and attn.size(1) == 1:\n # when ONNX tracing a single decoder step (sequence length == 1)\n # the transpose is a no-op copy before view, thus unnecessary\n attn = attn.contiguous().view(tgt_len, bsz, embed_dim)\n else:\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n attn_weights: Optional[Tensor] = None\n if need_weights:\n attn_weights = attn_weights_float.view(\n bsz, self.num_heads, tgt_len, src_len\n ).type_as(attn).transpose(1, 0)\n if not need_head_weights:\n # average attention weights over heads\n attn_weights = attn_weights.mean(dim=0)\n\n return attn, attn_weights\n\n @staticmethod\n def _append_prev_key_padding_mask(\n key_padding_mask: Optional[Tensor],\n prev_key_padding_mask: Optional[Tensor],\n batch_size: int,\n src_len: int,\n static_kv: bool,\n ) -> Optional[Tensor]:\n # saved key padding masks have shape (bsz, seq_len)\n if prev_key_padding_mask is not None and static_kv:\n new_key_padding_mask = prev_key_padding_mask\n elif prev_key_padding_mask is not None and key_padding_mask is not None:\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1\n )\n # During incremental decoding, as the padding token enters and\n # leaves the frame, there will be a time when prev or current\n # is None\n elif prev_key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - prev_key_padding_mask.size(1)),\n device=prev_key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), filler.float()], dim=1\n )\n elif key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - key_padding_mask.size(1)),\n device=key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat([filler.float(), key_padding_mask.float()], dim=1)\n else:\n new_key_padding_mask = prev_key_padding_mask\n return new_key_padding_mask\n\n @torch.jit.export\n def reorder_incremental_state(\n self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor\n ):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer_k = input_buffer[k]\n if input_buffer_k is not None:\n if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(\n 0\n ):\n break\n input_buffer[k] = input_buffer_k.index_select(0, new_order)\n incremental_state = self._set_input_buffer(incremental_state, input_buffer)\n return incremental_state\n\n def _get_input_buffer(\n self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]\n ) -> Dict[str, Optional[Tensor]]:\n result = self.get_incremental_state(incremental_state, \"attn_state\")\n if result is not None:\n return result\n else:\n empty_result: Dict[str, Optional[Tensor]] = {}\n return empty_result\n\n def _set_input_buffer(\n self,\n incremental_state: Dict[str, Dict[str, Optional[Tensor]]],\n buffer: Dict[str, Optional[Tensor]],\n ):\n return self.set_incremental_state(incremental_state, \"attn_state\", buffer)\n\n def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):\n return attn_weights\n\n def upgrade_state_dict_named(self, state_dict, name):\n prefix = name + \".\" if name != \"\" else \"\"\n items_to_add = {}\n keys_to_remove = []\n for k in state_dict.keys():\n if k.endswith(prefix + \"in_proj_weight\"):\n # in_proj_weight used to be q + k + v with same dimensions\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.weight\"] = state_dict[k][:dim]\n items_to_add[prefix + \"k_proj.weight\"] = state_dict[k][dim : 2 * dim]\n items_to_add[prefix + \"v_proj.weight\"] = state_dict[k][2 * dim :]\n\n keys_to_remove.append(k)\n\n k_bias = prefix + \"in_proj_bias\"\n if k_bias in state_dict.keys():\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.bias\"] = state_dict[k_bias][:dim]\n items_to_add[prefix + \"k_proj.bias\"] = state_dict[k_bias][dim : 2 * dim]\n items_to_add[prefix + \"v_proj.bias\"] = state_dict[k_bias][2 * dim :]\n\n keys_to_remove.append(prefix + \"in_proj_bias\")\n\n for k in keys_to_remove:\n del state_dict[k]\n\n for key, value in items_to_add.items():\n state_dict[key] = value"
},
{
"identifier": "ColumnSelfAttention",
"path": "src/models/msa_transformer/axial_attention.py",
"snippet": "class ColumnSelfAttention(nn.Module):\n \"\"\"Compute self-attention over columns of a 2D input.\"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n dropout=0.0,\n max_tokens_per_msa: int = 2 ** 16,\n ):\n super().__init__()\n\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n self.scaling = self.head_dim ** -0.5\n self.max_tokens_per_msa = max_tokens_per_msa\n\n self.k_proj = nn.Linear(embed_dim, embed_dim)\n self.v_proj = nn.Linear(embed_dim, embed_dim)\n self.q_proj = nn.Linear(embed_dim, embed_dim)\n\n self.out_proj = nn.Linear(embed_dim, embed_dim)\n self.dropout_module = nn.Dropout(dropout)\n\n def _batched_forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n max_cols = max(1, self.max_tokens_per_msa // num_rows)\n outputs = []\n attns = []\n for start in range(0, num_cols, max_cols):\n output, attn = self(\n x[:, start : start + max_cols],\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask[:, :, start : start + max_cols]\n if self_attn_padding_mask is not None\n else None,\n )\n outputs.append(output)\n attns.append(attn)\n output = torch.cat(outputs, 1)\n attns = torch.cat(attns, 1)\n return output, attns\n\n def compute_attention_update(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n if num_rows == 1:\n # if there is only 1 position, this is equivalent and doesn't break with padding\n attn_probs = torch.ones(\n self.num_heads,\n num_cols,\n batch_size,\n num_rows,\n num_rows,\n device=x.device,\n dtype=x.dtype,\n )\n output = self.out_proj(self.v_proj(x))\n else:\n q = self.q_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n k = self.k_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n v = self.v_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n q *= self.scaling\n\n attn_weights = torch.einsum(\"icnhd,jcnhd->hcnij\", q, k)\n\n if self_attn_mask is not None:\n raise NotImplementedError\n if self_attn_padding_mask is not None:\n attn_weights = attn_weights.masked_fill(\n self_attn_padding_mask.permute(2, 0, 1).unsqueeze(0).unsqueeze(3),\n -10000,\n )\n\n attn_probs = attn_weights.softmax(-1)\n attn_probs = self.dropout_module(attn_probs)\n context = torch.einsum(\"hcnij,jcnhd->icnhd\", attn_probs, v)\n context = context.contiguous().view(num_rows, num_cols, batch_size, embed_dim)\n output = self.out_proj(context)\n return output, attn_probs\n\n def forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n # if False and num_rows * num_cols > 2 ** 14 and not torch.is_grad_enabled():\n if (num_rows * num_cols) > self.max_tokens_per_msa and not torch.is_grad_enabled():\n return self._batched_forward(\n x,\n self_attn_mask,\n self_attn_padding_mask,\n )\n else:\n return self.compute_attention_update(x, self_attn_mask, self_attn_padding_mask)"
},
{
"identifier": "RowSelfAttention",
"path": "src/models/msa_transformer/axial_attention.py",
"snippet": "class RowSelfAttention(nn.Module):\n \"\"\"Compute self-attention over rows of a 2D input.\"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n dropout=0.0,\n max_tokens_per_msa: int = 2 ** 16,\n ):\n super().__init__()\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n self.scaling = self.head_dim ** -0.5\n self.max_tokens_per_msa = max_tokens_per_msa\n self.attn_shape = \"hnij\"\n\n self.k_proj = nn.Linear(embed_dim, embed_dim)\n self.v_proj = nn.Linear(embed_dim, embed_dim)\n self.q_proj = nn.Linear(embed_dim, embed_dim)\n\n self.out_proj = nn.Linear(embed_dim, embed_dim)\n self.dropout_module = nn.Dropout(dropout)\n\n def align_scaling(self, q):\n num_rows = q.size(0)\n return self.scaling / math.sqrt(num_rows)\n\n def _batched_forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n max_rows = max(1, self.max_tokens_per_msa // num_cols)\n attns = 0\n scaling = self.align_scaling(x)\n for start in range(0, num_rows, max_rows):\n attn_weights = self.compute_attention_weights(\n x[start : start + max_rows],\n scaling,\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask[:, start : start + max_rows]\n if self_attn_padding_mask is not None\n else None,\n )\n attns += attn_weights\n attn_probs = attns.softmax(-1)\n attn_probs = self.dropout_module(attn_probs)\n\n outputs = []\n for start in range(0, num_rows, max_rows):\n output = self.compute_attention_update(x[start : start + max_rows], attn_probs)\n outputs.append(output)\n\n output = torch.cat(outputs, 0)\n return output, attn_probs\n\n def compute_attention_weights(\n self,\n x,\n scaling: float,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n q = self.q_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n k = self.k_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n q *= scaling\n if self_attn_padding_mask is not None:\n # Zero out any padded aligned positions - this is important since\n # we take a sum across the alignment axis.\n q *= 1 - self_attn_padding_mask.permute(1, 2, 0).unsqueeze(3).unsqueeze(4).to(q)\n\n attn_weights = torch.einsum(f\"rinhd,rjnhd->{self.attn_shape}\", q, k)\n\n if self_attn_mask is not None:\n attn_weights = attn_weights.masked_fill(\n self_attn_mask.unsqueeze(0).unsqueeze(0),\n -10000,\n )\n\n if self_attn_padding_mask is not None:\n attn_weights = attn_weights.masked_fill(\n self_attn_padding_mask[:, 0].unsqueeze(0).unsqueeze(2),\n -10000,\n )\n\n return attn_weights\n\n def compute_attention_update(\n self,\n x,\n attn_probs,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n v = self.v_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n context = torch.einsum(f\"{self.attn_shape},rjnhd->rinhd\", attn_probs, v)\n context = context.contiguous().view(num_rows, num_cols, batch_size, embed_dim)\n output = self.out_proj(context)\n return output\n\n def forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n if (num_rows * num_cols > self.max_tokens_per_msa) and not torch.is_grad_enabled():\n return self._batched_forward(x, self_attn_mask, self_attn_padding_mask)\n else:\n scaling = self.align_scaling(x)\n attn_weights = self.compute_attention_weights(\n x, scaling, self_attn_mask, self_attn_padding_mask\n )\n attn_probs = attn_weights.softmax(-1)\n attn_probs = self.dropout_module(attn_probs)\n output = self.compute_attention_update(x, attn_probs)\n return output, attn_probs"
}
] | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Optional
from .multihead_attention import MultiheadAttention # noqa
from .axial_attention import ColumnSelfAttention, RowSelfAttention
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
from torch.nn import LayerNorm as ESM1bLayerNorm | 7,964 |
class ESM1LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12, affine=True):
"""Construct a layernorm layer in the TF style (eps inside the sqrt)."""
super().__init__()
self.hidden_size = (hidden_size,) if isinstance(hidden_size, int) else tuple(hidden_size)
self.eps = eps
self.affine = bool(affine)
if self.affine:
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
else:
self.weight, self.bias = None, None
def forward(self, x):
dims = tuple(-(i + 1) for i in range(len(self.hidden_size)))
means = x.mean(dims, keepdim=True)
x_zeromean = x - means
variances = x_zeromean.pow(2).mean(dims, keepdim=True)
x = x_zeromean / torch.sqrt(variances + self.eps)
if self.affine:
x = (self.weight * x) + self.bias
return x
try:
class ESM1bLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
class TransformerLayer(nn.Module):
"""Transformer layer block."""
def __init__(
self,
embed_dim,
ffn_embed_dim,
attention_heads,
add_bias_kv=True,
use_esm1b_layer_norm=False,
use_rotary_embeddings: bool = False,
):
super().__init__()
self.embed_dim = embed_dim
self.ffn_embed_dim = ffn_embed_dim
self.attention_heads = attention_heads
self.use_rotary_embeddings = use_rotary_embeddings
self._init_submodules(add_bias_kv, use_esm1b_layer_norm)
def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm):
BertLayerNorm = ESM1bLayerNorm if use_esm1b_layer_norm else ESM1LayerNorm
self.self_attn = MultiheadAttention(
self.embed_dim,
self.attention_heads,
add_bias_kv=add_bias_kv,
add_zero_attn=False,
use_rotary_embeddings=self.use_rotary_embeddings,
)
self.self_attn_layer_norm = BertLayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim)
self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim)
self.final_layer_norm = BertLayerNorm(self.embed_dim)
def forward(
self, x, self_attn_mask=None, self_attn_padding_mask=None, need_head_weights=False
):
residual = x
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=True,
need_head_weights=need_head_weights,
attn_mask=self_attn_mask,
)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = gelu(self.fc1(x))
x = self.fc2(x)
x = residual + x
return x, attn
class AxialTransformerLayer(nn.Module):
"""Implements an Axial MSA Transformer block."""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
max_tokens_per_msa: int = 2**14,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout_prob = dropout
| # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def symmetrize(x):
"Make layer symmetric in final two dimensions, used for contact prediction."
return x + x.transpose(-1, -2)
def apc(x):
"Perform average product correct, used for contact prediction."
a1 = x.sum(-1, keepdims=True)
a2 = x.sum(-2, keepdims=True)
a12 = x.sum((-1, -2), keepdims=True)
avg = a1 * a2
avg.div_(a12) # in-place to reduce memory
normalized = x - avg
return normalized
class ESM1LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12, affine=True):
"""Construct a layernorm layer in the TF style (eps inside the sqrt)."""
super().__init__()
self.hidden_size = (hidden_size,) if isinstance(hidden_size, int) else tuple(hidden_size)
self.eps = eps
self.affine = bool(affine)
if self.affine:
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
else:
self.weight, self.bias = None, None
def forward(self, x):
dims = tuple(-(i + 1) for i in range(len(self.hidden_size)))
means = x.mean(dims, keepdim=True)
x_zeromean = x - means
variances = x_zeromean.pow(2).mean(dims, keepdim=True)
x = x_zeromean / torch.sqrt(variances + self.eps)
if self.affine:
x = (self.weight * x) + self.bias
return x
try:
class ESM1bLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
class TransformerLayer(nn.Module):
"""Transformer layer block."""
def __init__(
self,
embed_dim,
ffn_embed_dim,
attention_heads,
add_bias_kv=True,
use_esm1b_layer_norm=False,
use_rotary_embeddings: bool = False,
):
super().__init__()
self.embed_dim = embed_dim
self.ffn_embed_dim = ffn_embed_dim
self.attention_heads = attention_heads
self.use_rotary_embeddings = use_rotary_embeddings
self._init_submodules(add_bias_kv, use_esm1b_layer_norm)
def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm):
BertLayerNorm = ESM1bLayerNorm if use_esm1b_layer_norm else ESM1LayerNorm
self.self_attn = MultiheadAttention(
self.embed_dim,
self.attention_heads,
add_bias_kv=add_bias_kv,
add_zero_attn=False,
use_rotary_embeddings=self.use_rotary_embeddings,
)
self.self_attn_layer_norm = BertLayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim)
self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim)
self.final_layer_norm = BertLayerNorm(self.embed_dim)
def forward(
self, x, self_attn_mask=None, self_attn_padding_mask=None, need_head_weights=False
):
residual = x
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=True,
need_head_weights=need_head_weights,
attn_mask=self_attn_mask,
)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = gelu(self.fc1(x))
x = self.fc2(x)
x = residual + x
return x, attn
class AxialTransformerLayer(nn.Module):
"""Implements an Axial MSA Transformer block."""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
max_tokens_per_msa: int = 2**14,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout_prob = dropout
| row_self_attention = RowSelfAttention( | 2 | 2023-12-09 11:42:34+00:00 | 12k |
ChatClue/ChatClue | osiris.py | [
{
"identifier": "OSHelper",
"path": "utils/os/helpers.py",
"snippet": "class OSHelper:\n \"\"\"\n Provides utility methods for operating system level operations, particularly file management.\n\n This class includes static methods for performing various file system tasks such as cleaning up orphaned files and retrieving files.\n \"\"\"\n\n @staticmethod\n def find_closest_image(directory, target_time):\n \"\"\"\n Finds the closest image file in a directory based on the target time.\n\n This function searches through all JPG files in the specified directory and \n selects the one whose creation time is closest to, but not earlier than, \n the target time.\n\n Args:\n directory (str): The directory path where the image files are stored.\n target_time (float): The target time (in seconds since epoch) to compare the file creation times against.\n\n Returns:\n str: The path of the closest image file. Returns None if no suitable file is found.\n \"\"\"\n closest_file = None\n closest_time_diff = None\n\n # Iterate over each file in the specified directory\n for filename in os.listdir(directory):\n if filename.lower().endswith(\".jpg\"): # Check if the file is a JPG image\n filepath = os.path.join(directory, filename)\n filetime = os.path.getmtime(filepath) # Get the modification time of the file\n # Check if the file's time is later than the target time and if it's the closest so far\n if filetime > target_time:\n logging.info(f\"File is close: {filepath} - Time: {filetime}\")\n time_diff = filetime - target_time\n if closest_time_diff is None or time_diff < closest_time_diff:\n closest_file = filepath\n closest_time_diff = time_diff\n return closest_file\n\n @staticmethod\n def convert_image_to_base64(filepath):\n \"\"\"\n Converts an image file to a Base64 encoded string.\n\n This function reads the image file from the given filepath, encodes it in Base64,\n and then decodes it to a UTF-8 string, which can be easily used for data transfer \n or embedding in web pages.\n\n Args:\n filepath (str): The path of the image file to be converted.\n\n Returns:\n str: The Base64 encoded string of the image.\n \"\"\"\n with open(filepath, \"rb\") as image_file:\n # Read the file and encode it in Base64\n return base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n @staticmethod\n def clear_orphaned_audio_files():\n \"\"\"\n Removes all audio files in a specific directory.\n\n This method is used to clear out any leftover audio files in the 'tmp/audio' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for audio files\n directory_path = 'tmp/audio'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n \n @staticmethod\n def clear_orphaned_video_files():\n \"\"\"\n Removes all video files in a specific directory.\n\n This method is used to clear out any leftover video files in the 'tmp/video' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for video files\n directory_path = 'tmp/video'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n\n @staticmethod\n def system_file_cleanup():\n \"\"\"\n Performs a general cleanup of system files.\n\n Currently, this method focuses on clearing orphaned audio files but can be expanded to include other cleanup tasks.\n \"\"\"\n # Clear orphaned audio files\n OSHelper.clear_orphaned_audio_files()\n OSHelper.clear_orphaned_video_files()\n \n @staticmethod\n def configure_tmp_directories():\n \"\"\"\n Ensures that the required directories (tmp/audio and tmp/video) exist.\n Creates them if they do not exist.\n \"\"\"\n directories = ['tmp/audio', 'tmp/video']\n for directory in directories:\n os.makedirs(directory, exist_ok=True)\n logging.info(f\"Checked and ensured directory exists: {directory}\")"
},
{
"identifier": "get_celery_app",
"path": "celery_config.py",
"snippet": "def get_celery_app():\n return celery_app"
},
{
"identifier": "DatabaseSetup",
"path": "database/setup.py",
"snippet": "class DatabaseSetup:\n \"\"\"\n This class is responsible for database setup tasks, particularly\n for ensuring that all defined tables in SQLAlchemy models are created in the database.\n \"\"\"\n\n @staticmethod\n def initial_setup():\n \"\"\"\n Creates tables in the database based on the SQLAlchemy models.\n\n This method uses the SQLAlchemy engine to connect to the database and creates\n any tables that haven't been created yet as defined in the SQLAlchemy model classes.\n It's intended to be run during the initial setup phase of the application.\n \"\"\"\n\n # Obtain the SQLAlchemy engine\n engine = get_engine()\n\n # Ensure vector extension is enabled.\n with engine.begin() as connection:\n # Create extension 'pgvector' if it is not created yet\n # Remember, you may need to install pgvector on your system before this will work properly.\n # https://github.com/pgvector/pgvector.git for instructions.\n connection.execute(text(\"CREATE EXTENSION IF NOT EXISTS vector\"))\n\n # Create all tables in the database defined in the SQLAlchemy models\n # This will have no effect on existing tables that match the model definitions\n Base.metadata.create_all(engine)"
},
{
"identifier": "broadcaster",
"path": "broadcast/broadcaster.py",
"snippet": "class Broadcaster:\n def __init__(self):\n def send_message(self, message):\n def start(self):\n def shutdown(self):"
},
{
"identifier": "AudioProcessor",
"path": "audio/audio_processor.py",
"snippet": "class AudioProcessor:\n \"\"\"\n A class to handle audio processing, including capturing audio input, \n processing it with Vosk for speech recognition, and responding using OpenAI's GPT model.\n\n Attributes:\n model (Vosk.Model): Vosk speech recognition model.\n samplerate (int): The sample rate for audio capture.\n device (str): The name of the audio input device.\n blocksize (int): The block size for audio processing.\n dump_filename (str): Filename to dump the audio input, if provided.\n \"\"\"\n\n def __init__(self):\n self.model = Model(lang=AUDIO_SETTINGS.get('VOSK_MODEL', \"en-us\"))\n self.samplerate = AUDIO_SETTINGS.get('SOUND_DEVICE_SAMPLERATE')\n self.device = AUDIO_SETTINGS.get('SOUND_DEVICE_DEVICE')\n self.blocksize = AUDIO_SETTINGS.get('SOUND_DEVICE_BLOCK_SIZE', 28000)\n self.dump_filename = AUDIO_SETTINGS.get('AUDIO_IN_DUMP_FILENAME')\n self.audio_queue = queue.Queue()\n self.openai_client = OpenAIClient()\n self.openai_conversation_builder = OpenAIConversationBuilder()\n self.tool_processor = ToolProcessor()\n self.broadcaster = broadcaster\n self.audio_out = get_audio_out()\n self.audio_out_response_buffer = ''\n self.full_assistant_response = ''\n self.last_wake_time = 0\n self.last_response_end_time = 0\n self.processing_openai_request = False\n self.shutdown_event = threading.Event()\n\n def open_dump_file(self):\n \"\"\"Opens the file to dump audio input if a filename is provided.\"\"\"\n if self.dump_filename is not None:\n self.dump_filename = open(self.dump_filename, \"wb\")\n\n def close_dump_file(self):\n \"\"\"Closes the audio dump file if it was opened.\"\"\"\n if self.dump_filename is not None:\n self.dump_filename.close()\n\n def should_process(self, result, current_time):\n \"\"\"\n Determines whether the robot should process the input based on wake phrases or elapsed time.\n\n Args:\n result (str): The recognized text from the audio input.\n current_time (float): The current time in seconds.\n\n Returns:\n bool: True if the input should be processed, False otherwise.\n \"\"\"\n return (not contains_quiet_please_phrase(result) and contains_wake_phrase(result)) or \\\n (not contains_quiet_please_phrase(result) and (current_time - self.last_wake_time <= 10) or (current_time - self.last_response_end_time <= 10) and not self.audio_out.is_playing) \\\n\n def update_wake_time(self):\n \"\"\"Updates the time when a wake phrase was last heard.\"\"\"\n self.last_wake_time = time.time()\n self.save_system_state()\n\n def update_response_end_time(self):\n \"\"\"Updates the time when the robot's last response ended.\"\"\"\n self.last_response_end_time = time.time()\n\n def callback(self, indata, frames, time, status):\n \"\"\"\n Callback function for audio input stream.\n\n Args:\n indata: The buffer containing the incoming sound.\n frames: The number of frames.\n time: Current stream time.\n status: Status of the stream.\n \"\"\"\n if status:\n logging.warning(status)\n self.audio_queue.put(bytes(indata))\n\n def process_stream(self):\n \"\"\"\n Processes the audio stream by recognizing speech and generating responses.\n\n Continuously captures audio, performs speech recognition, and generates responses using OpenAI.\n \"\"\"\n self.open_dump_file()\n try:\n with sd.RawInputStream(samplerate=self.samplerate, blocksize=self.blocksize, device=self.device,\n dtype=\"int16\", channels=1, callback=self.callback):\n rec = KaldiRecognizer(self.model, self.samplerate)\n openai_stream_thread = None\n\n while not self.shutdown_event.is_set():\n data, current_time = self.get_audio_data()\n result = self.process_recognition(data, rec)\n\n if result:\n openai_stream_thread = self.handle_speech(result, openai_stream_thread, current_time)\n\n self.handle_partial_results(rec)\n self.write_to_dump_file(data)\n self.process_openai_response()\n\n # except Exception as e:\n # logging.error(f\"An error occurred: {e}\")\n finally:\n self.close_dump_file()\n\n def get_audio_data(self):\n \"\"\"\n Retrieves audio data from the queue.\n\n Returns:\n tuple: A tuple containing the audio data and the current time.\n \"\"\"\n data = self.audio_queue.get()\n current_time = time.time()\n return data, current_time\n\n def process_recognition(self, data, rec):\n \"\"\"\n Processes the recognition of speech from audio data.\n\n Args:\n data: The audio data to be processed.\n rec (KaldiRecognizer): The Vosk recognizer instance.\n\n Returns:\n str or None: Recognized text or None if no significant speech is recognized.\n \"\"\"\n if rec.AcceptWaveform(data):\n result = json.loads(rec.Result())[\"text\"]\n if result not in ['', 'huh']:\n self.broadcaster.send_message(result)\n logging.info(\"ROBOT HEARD: \" + result)\n return result\n return None\n\n def handle_speech(self, result, openai_stream_thread, current_time):\n \"\"\"\n Processes the recognized speech and determines the appropriate response.\n\n Args:\n result (str): Recognized speech text.\n openai_stream_thread (threading.Thread): The current OpenAI stream thread.\n current_time (float): Current time in seconds.\n\n Returns:\n threading.Thread: Updated or new OpenAI stream thread.\n \"\"\"\n try:\n if self.should_process(result, current_time) and not self.processing_openai_request:\n self.update_wake_time()\n self.processing_openai_request = True\n if not openai_stream_thread or not openai_stream_thread.is_alive():\n self.openai_client.stop_signal.clear()\n is_tool_request, conversation = self.determine_tool_request(result)\n if is_tool_request:\n self.handle_tool_request(result, conversation)\n else:\n self.continue_conversation(result, conversation)\n else:\n logging.info(\"ROBOT THOUGHT: Ignoring Conversation, it doesn't appear to be relevant.\")\n finally:\n self.processing_openai_request = False\n return openai_stream_thread\n \n \n def determine_tool_request(self, result):\n \"\"\"\n Determines whether the given input text is a tool request.\n\n Args:\n result (str): The recognized text to evaluate.\n\n Returns:\n Tuple[bool, list]: A tuple containing a boolean indicating whether it's a tool request, \n and the conversation array for further processing.\n \"\"\"\n call_type_messages = self.openai_conversation_builder.create_check_if_tool_call_messages(result)\n openai_is_tool_response = self.openai_client.create_completion(call_type_messages, False, {\"type\": \"json_object\"}, openai_functions, True)\n \n is_tool_request = False\n conversation = self.openai_conversation_builder.create_recent_conversation_messages_array(result)\n\n try:\n if openai_is_tool_response and openai_is_tool_response.choices:\n is_tool_request = json.loads(openai_is_tool_response.choices[0].message.content).get(\"is_tool\", False)\n except (TypeError, AttributeError, json.JSONDecodeError):\n print(\"Error parsing OpenAI response or response not in expected format.\")\n\n return is_tool_request, conversation\n\n def handle_tool_request(self, result, conversation):\n \"\"\"\n Handles the processing of a tool request.\n\n Args:\n result (str): The recognized text.\n conversation (list): The conversation array built up to this point.\n \"\"\"\n tool_response = self.openai_client.create_completion(conversation, False, None, openai_functions)\n tool_response_message = tool_response.choices[0].message \n tool_calls = tool_response_message.tool_calls \n if tool_calls:\n self.process_tool_calls(tool_calls, result, conversation, tool_response_message)\n else:\n self.continue_conversation(result, conversation)\n\n def process_tool_calls(self, tool_calls, result, conversation, tool_response_message):\n \"\"\"\n Processes the tool calls received from OpenAI.\n\n Args:\n tool_calls (list): List of tool calls from OpenAI response.\n result (str): The recognized text.\n conversation (list): The conversation array.\n tool_response_message (Message): The tool response message from OpenAI.\n \"\"\"\n tool_call = tool_calls[0]\n tool_processor_response = self.tool_processor.process_tool_request(tool_call)\n if tool_processor_response[\"success\"]:\n self.handle_successful_tool_response(tool_processor_response, result, conversation, tool_response_message)\n else:\n self.audio_out.add_to_queue(get_tool_not_found_phrase())\n\n def handle_successful_tool_response(self, tool_processor_response, result, conversation, tool_response_message):\n \"\"\"\n Handles a successful tool response.\n\n Args:\n tool_processor_response (dict): The response from the tool processor.\n result (str): The recognized text.\n conversation (list): The conversation array.\n tool_response_message (Message): The tool response message from OpenAI.\n \"\"\"\n if tool_processor_response[\"is_conversational\"]:\n conversation.append(tool_response_message)\n tool_call_response_message = self.openai_conversation_builder.create_tool_call_response_message(tool_processor_response)\n conversation.append(tool_call_response_message)\n openai_stream_thread = threading.Thread(target=self.openai_client.stream_response, args=(conversation,))\n openai_stream_thread.start()\n else:\n self.store_conversation(speaker_type=CONVERSATIONS_CONFIG[\"user\"], response=result)\n\n def continue_conversation(self, result, conversation):\n \"\"\"\n Continues the conversation with OpenAI based on the given result.\n\n Args:\n result (str): The recognized text to continue the conversation with.\n conversation (list): The existing conversation array.\n \"\"\"\n self.openai_client.stop_processing_request()\n conversation = self.openai_conversation_builder.create_recent_conversation_messages_array(result)\n openai_stream_thread = threading.Thread(target=self.openai_client.stream_response, args=(conversation,))\n openai_stream_thread.start()\n logging.info(\"ROBOT ACTION: Committing user input to memory.\")\n self.store_conversation(speaker_type=CONVERSATIONS_CONFIG[\"user\"], response=result)\n\n\n def handle_partial_results(self, rec):\n \"\"\"\n Handles partial results from speech recognition.\n\n Args:\n rec (KaldiRecognizer): The Vosk recognizer instance.\n \"\"\"\n partial_result_json = json.loads(rec.PartialResult())\n if 'partial' in partial_result_json and contains_quiet_please_phrase(partial_result_json['partial']):\n self.stop_conversation_and_audio()\n\n def stop_conversation_and_audio(self):\n \"\"\"\n Stops the conversation and any ongoing audio processing.\n \"\"\"\n logging.info(\"ROBOT THOUGHT: Request to stop talking recognized. Stopping stream.\")\n self.stop_all_audio()\n if self.full_assistant_response:\n logging.info(\"ROBOT ACTION: Committing my partial response to memory\")\n self.store_full_assistant_response()\n\n def stop_all_audio(self):\n self.audio_out_response_buffer = ''\n self.openai_client.stop_processing_request()\n self.audio_out.stop_all_audio()\n\n def write_to_dump_file(self, data):\n \"\"\"\n Writes audio data to the dump file if it's open.\n\n Args:\n data: The audio data to be written to the file.\n \"\"\"\n if self.dump_filename is not None:\n self.dump_filename.write(data)\n\n def process_openai_response(self):\n \"\"\"\n Processes responses from OpenAI's GPT model.\n\n Retrieves and handles the responses generated by OpenAI.\n \"\"\"\n while not self.openai_client.response_queue.empty():\n chunk = self.openai_client.response_queue.get()\n if chunk.choices[0].delta.content is not None:\n response_text = chunk.choices[0].delta.content\n print(response_text, end='', flush=True)\n self.update_response_end_time()\n self.audio_out_response_buffer += response_text\n if self.audio_out_response_buffer.endswith(('.', '?', '!', ';')):\n self.audio_out.add_to_queue(self.audio_out_response_buffer)\n self.audio_out_response_buffer = \"\"\n self.full_assistant_response += response_text\n\n if self.full_assistant_response and self.openai_client.streaming_complete:\n logging.info(\"ROBOT ACTION: Committing my full response to memory\")\n self.store_full_assistant_response()\n\n def store_full_assistant_response(self):\n \"\"\"\n Stores the full assistant response in the database.\n \"\"\"\n self.store_conversation(speaker_type=CONVERSATIONS_CONFIG[\"assistant\"], response=self.full_assistant_response)\n self.full_assistant_response = ''\n\n def store_conversation(self, speaker_type, response):\n \"\"\"\n Stores the conversation part in the database asynchronously using a Celery task.\n\n Args:\n speakerType (str): \"user\" or \"assistant\", indicating who is speaking.\n response (str): The text of the response.\n \"\"\"\n get_celery_app().send_task('background.memory.tasks.store_conversation_task', args=[speaker_type, response])\n logging.info(\"Store conversation task submitted to background\")\n \n def save_system_state(self):\n \"\"\"\n Saves the system state in the database asynchronously using a Celery task.\n \"\"\"\n get_celery_app().send_task('background.memory.tasks.update_system_state_task', args=[self.last_wake_time])\n logging.info(\"Update system state task submitted to background\")\n\n def shutdown(self):\n self.shutdown_event.set()"
},
{
"identifier": "VideoProcessor",
"path": "video/video_processor.py",
"snippet": "class VideoProcessor:\n \"\"\"\n A class to handle video processing, including capturing video input and \n processing it with MediaPipe for pose estimation.\n \"\"\"\n\n def __init__(self):\n # MediaPipe Pose solution initialization\n self.mp_pose = mp.solutions.pose\n self.pose = self.mp_pose.Pose()\n self.cap = None\n\n # Video capture settings\n self.frame_rate = VIDEO_SETTINGS.get('FRAME_RATE', 30)\n self.device = VIDEO_SETTINGS.get('VIDEO_DEVICE', 0)\n self.capture_interval = VIDEO_SETTINGS.get('CAPTURE_INTERVAL', 1)\n self.frame_counter = 0\n self.last_capture_time = time.time()\n self.frame_queue = queue.Queue()\n\n # Check and create tmp directory for storing frames\n self.tmp_folder = 'tmp/video'\n if not os.path.exists(self.tmp_folder):\n os.makedirs(self.tmp_folder)\n\n self.shutdown_event = threading.Event()\n\n def process_stream(self):\n \"\"\"\n Captures and processes the video stream.\n \"\"\"\n if VIDEO_SETTINGS.get('CAPTURE_VIDEO', False):\n self.cap = cv2.VideoCapture(self.device)\n\n while not self.shutdown_event.is_set():\n ret, frame = self.cap.read()\n if not ret:\n continue\n\n # Process the frame\n #self.process_frame(frame)\n\n # Capture frames at a set interval for saving\n if time.time() - self.last_capture_time > self.capture_interval:\n frame_name = os.path.join(self.tmp_folder, f\"frame_{self.frame_counter}.jpg\")\n cv2.imwrite(frame_name, frame)\n logging.debug(f\"Frame saved as {frame_name}\")\n self.frame_counter += 1\n self.last_capture_time = time.time()\n\n self.clean_up()\n \n def clean_up(self):\n \"\"\"\n Releases resources and closes windows.\n \"\"\"\n if self.cap:\n self.cap.release()\n cv2.destroyAllWindows()\n OSHelper.clear_orphaned_video_files()\n\n def process_frame(self, frame):\n \"\"\"\n Processes a single video frame.\n \"\"\"\n self.frame_queue.put(frame)\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n results = self.pose.process(frame_rgb)\n\n if results.pose_landmarks:\n # Draw pose landmarks\n mp.solutions.drawing_utils.draw_landmarks(frame, results.pose_landmarks, self.mp_pose.POSE_CONNECTIONS)\n # Additional processing can be added here\n \n def shutdown(self):\n \"\"\"\n Signals the thread to terminate.\n \"\"\"\n self.shutdown_event.set()"
},
{
"identifier": "get_audio_out",
"path": "audio/audio_out.py",
"snippet": "def get_audio_out():\n \"\"\"\n Returns the instance of AudioOutput for use.\n\n Returns:\n AudioOutput: The instance of the AudioOutput class.\n \"\"\"\n return audio_out"
},
{
"identifier": "OSHelper",
"path": "utils/os/helpers.py",
"snippet": "class OSHelper:\n \"\"\"\n Provides utility methods for operating system level operations, particularly file management.\n\n This class includes static methods for performing various file system tasks such as cleaning up orphaned files and retrieving files.\n \"\"\"\n\n @staticmethod\n def find_closest_image(directory, target_time):\n \"\"\"\n Finds the closest image file in a directory based on the target time.\n\n This function searches through all JPG files in the specified directory and \n selects the one whose creation time is closest to, but not earlier than, \n the target time.\n\n Args:\n directory (str): The directory path where the image files are stored.\n target_time (float): The target time (in seconds since epoch) to compare the file creation times against.\n\n Returns:\n str: The path of the closest image file. Returns None if no suitable file is found.\n \"\"\"\n closest_file = None\n closest_time_diff = None\n\n # Iterate over each file in the specified directory\n for filename in os.listdir(directory):\n if filename.lower().endswith(\".jpg\"): # Check if the file is a JPG image\n filepath = os.path.join(directory, filename)\n filetime = os.path.getmtime(filepath) # Get the modification time of the file\n # Check if the file's time is later than the target time and if it's the closest so far\n if filetime > target_time:\n logging.info(f\"File is close: {filepath} - Time: {filetime}\")\n time_diff = filetime - target_time\n if closest_time_diff is None or time_diff < closest_time_diff:\n closest_file = filepath\n closest_time_diff = time_diff\n return closest_file\n\n @staticmethod\n def convert_image_to_base64(filepath):\n \"\"\"\n Converts an image file to a Base64 encoded string.\n\n This function reads the image file from the given filepath, encodes it in Base64,\n and then decodes it to a UTF-8 string, which can be easily used for data transfer \n or embedding in web pages.\n\n Args:\n filepath (str): The path of the image file to be converted.\n\n Returns:\n str: The Base64 encoded string of the image.\n \"\"\"\n with open(filepath, \"rb\") as image_file:\n # Read the file and encode it in Base64\n return base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n @staticmethod\n def clear_orphaned_audio_files():\n \"\"\"\n Removes all audio files in a specific directory.\n\n This method is used to clear out any leftover audio files in the 'tmp/audio' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for audio files\n directory_path = 'tmp/audio'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n \n @staticmethod\n def clear_orphaned_video_files():\n \"\"\"\n Removes all video files in a specific directory.\n\n This method is used to clear out any leftover video files in the 'tmp/video' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for video files\n directory_path = 'tmp/video'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n\n @staticmethod\n def system_file_cleanup():\n \"\"\"\n Performs a general cleanup of system files.\n\n Currently, this method focuses on clearing orphaned audio files but can be expanded to include other cleanup tasks.\n \"\"\"\n # Clear orphaned audio files\n OSHelper.clear_orphaned_audio_files()\n OSHelper.clear_orphaned_video_files()\n \n @staticmethod\n def configure_tmp_directories():\n \"\"\"\n Ensures that the required directories (tmp/audio and tmp/video) exist.\n Creates them if they do not exist.\n \"\"\"\n directories = ['tmp/audio', 'tmp/video']\n for directory in directories:\n os.makedirs(directory, exist_ok=True)\n logging.info(f\"Checked and ensured directory exists: {directory}\")"
},
{
"identifier": "welcome_message",
"path": "utils/text/welcome.py",
"snippet": "def welcome_message():\n print(\"\"\"\n ChatClue: Osiris\n \n /\\_/\\ \n ( o.o ) \n > ^ <\n \n Optimized System for Integrated Real-Time Interaction and Sensing\n \"\"\")"
},
{
"identifier": "ColorFormatter",
"path": "utils/logging/colors.py",
"snippet": "class ColorFormatter(logging.Formatter):\n def format(self, record):\n levelname = record.levelname\n message = logging.Formatter.format(self, record)\n return COLORS.get(levelname, '') + message + COLORS['ENDC']"
}
] | from config import CELERY_CONFIG, LOG_LEVEL, VIDEO_SETTINGS
from utils.os.helpers import OSHelper
from celery import Celery
from celery_config import get_celery_app
from database.setup import DatabaseSetup
from broadcast.broadcaster import broadcaster
from audio.audio_processor import AudioProcessor
from video.video_processor import VideoProcessor
from audio.audio_out import get_audio_out
from utils.os.helpers import OSHelper
from utils.text.welcome import welcome_message
from utils.logging.colors import ColorFormatter
from background.memory.tasks import *
from tools import * # Import all openai tool functions
import logging
import subprocess
import atexit
import sys
import threading
import time
import cv2
import queue | 7,353 |
# Configure basic logging for the application
logging.basicConfig(level=LOG_LEVEL)
root_logger = logging.getLogger()
for handler in root_logger.handlers:
handler.setFormatter(ColorFormatter('%(asctime)s - %(levelname)s - %(message)s'))
# Ensure the necessary tmp/ directories exist
OSHelper.configure_tmp_directories()
# Configure background processor / subconcious systems
celery_app = get_celery_app()
# Configure audio output
audio_out = get_audio_out()
def start_celery_worker():
"""
Starts a Celery worker as a subprocess.
This method initiates a Celery worker using the subprocess module. The worker runs asynchronously
and executes tasks defined in the Celery application. The worker is configured to log at the
'info' level for better visibility of its operations.
The function also ensures that the Celery worker is terminated gracefully when the Python script exits.
This is achieved using the `atexit` module, which registers a function to terminate the worker
as part of the script's cleanup process.
Returns:
subprocess.Popen: The subprocess object representing the Celery worker.
"""
# Get the log level from configuration, default to 'info'
log_level = CELERY_CONFIG.get('LOCAL_LOG_LEVEL', 'info')
# Start Celery worker
celery_worker = subprocess.Popen(['celery', '-A', 'osiris.celery_app', 'worker', f'--loglevel={log_level}'])
# Register function to terminate worker on exit
atexit.register(lambda: celery_worker.terminate())
return celery_worker
def stop_celery_worker(celery_worker):
"""
Stops the Celery worker gracefully.
Args:
celery_worker (subprocess.Popen): The subprocess object representing the Celery worker.
"""
if celery_worker:
# Send SIGTERM signal to gracefully terminate the worker
celery_worker.terminate()
# Wait for the worker to exit
try:
celery_worker.wait(timeout=0.5) # Adjust the timeout as needed
except subprocess.TimeoutExpired:
# If the worker doesn't terminate within the timeout, kill it
logging.info("Forcibly terminating the Celery worker.")
celery_worker.kill()
def main():
"""
Main function to initialize the application.
Configures celery background worker, database, broadcaster, and audio settings.
"""
welcome_message()
# Optionally start Celery worker
celery_worker = None
if CELERY_CONFIG.get("RUN_LOCALLY_AUTOMATICALLY", True):
logging.info("ROBOT THOUGHT: Starting subconscious systems locally")
celery_worker = start_celery_worker()
logging.info("ROBOT THOUGHT: Subconscious systems activated")
# Setup the database
|
# Configure basic logging for the application
logging.basicConfig(level=LOG_LEVEL)
root_logger = logging.getLogger()
for handler in root_logger.handlers:
handler.setFormatter(ColorFormatter('%(asctime)s - %(levelname)s - %(message)s'))
# Ensure the necessary tmp/ directories exist
OSHelper.configure_tmp_directories()
# Configure background processor / subconcious systems
celery_app = get_celery_app()
# Configure audio output
audio_out = get_audio_out()
def start_celery_worker():
"""
Starts a Celery worker as a subprocess.
This method initiates a Celery worker using the subprocess module. The worker runs asynchronously
and executes tasks defined in the Celery application. The worker is configured to log at the
'info' level for better visibility of its operations.
The function also ensures that the Celery worker is terminated gracefully when the Python script exits.
This is achieved using the `atexit` module, which registers a function to terminate the worker
as part of the script's cleanup process.
Returns:
subprocess.Popen: The subprocess object representing the Celery worker.
"""
# Get the log level from configuration, default to 'info'
log_level = CELERY_CONFIG.get('LOCAL_LOG_LEVEL', 'info')
# Start Celery worker
celery_worker = subprocess.Popen(['celery', '-A', 'osiris.celery_app', 'worker', f'--loglevel={log_level}'])
# Register function to terminate worker on exit
atexit.register(lambda: celery_worker.terminate())
return celery_worker
def stop_celery_worker(celery_worker):
"""
Stops the Celery worker gracefully.
Args:
celery_worker (subprocess.Popen): The subprocess object representing the Celery worker.
"""
if celery_worker:
# Send SIGTERM signal to gracefully terminate the worker
celery_worker.terminate()
# Wait for the worker to exit
try:
celery_worker.wait(timeout=0.5) # Adjust the timeout as needed
except subprocess.TimeoutExpired:
# If the worker doesn't terminate within the timeout, kill it
logging.info("Forcibly terminating the Celery worker.")
celery_worker.kill()
def main():
"""
Main function to initialize the application.
Configures celery background worker, database, broadcaster, and audio settings.
"""
welcome_message()
# Optionally start Celery worker
celery_worker = None
if CELERY_CONFIG.get("RUN_LOCALLY_AUTOMATICALLY", True):
logging.info("ROBOT THOUGHT: Starting subconscious systems locally")
celery_worker = start_celery_worker()
logging.info("ROBOT THOUGHT: Subconscious systems activated")
# Setup the database | DatabaseSetup.initial_setup() | 2 | 2023-12-06 09:10:06+00:00 | 12k |
lumina-test/lumina | lumina/e2e_test/test_cnp.py | [
{
"identifier": "get_qp_info_list",
"path": "lumina/analyzer/main.py",
"snippet": "LOG_FILENAME = \"analysis.log\"\nRESULT_FILENAME = \"result.out\"\ndef get_qp_info_list(switch_msg_snapshot):\ndef main(args):\ndef parse_args():"
},
{
"identifier": "Orchestrator",
"path": "lumina/orchestrator/main.py",
"snippet": "class Orchestrator:\n \"\"\" Class to manage the experiment \"\"\"\n def __init__(self, config_file):\n \"\"\" Constructor for Orchestrator class\n\n Args:\n config_file (str): path to the yaml (config) file.\n The file contains configs for switch, requester, responder, traffic, etc.\n\n Returns:\n N/A\n \"\"\"\n with open(config_file, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n local_workspace = conf['local-workspace']\n result_path = conf['result-path']\n switch_conf = conf['switch']\n requester_conf = conf['requester']\n responder_conf = conf['responder']\n requester_mirror_conf = conf['requester-mirror']\n responder_mirror_conf = conf['responder-mirror']\n traffic_conf = conf['traffic']\n rewrite_udp_dst_port = conf['rewrite-udp-dst-port']\n num_repeats = conf['num-repeats']\n agg_pcap_filename = conf['aggregate-pcap-filename']\n except KeyError as e:\n print(\"Config file %s has a bad yaml format (key error: %s)\" % (config_file, e))\n sys.exit(-1)\n\n switch_conf['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n requester_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n responder_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n\n self.local_workspace = local_workspace\n self.result_path = result_path\n self.traffic_conf = traffic_conf\n self.num_repeats = num_repeats\n self.switch = switch.Switch(switch_conf)\n self.requester = host.RDMAHost(requester_conf)\n self.responder = host.RDMAHost(responder_conf)\n self.requester_mirror = host.MirrorHost(requester_mirror_conf)\n self.responder_mirror = host.MirrorHost(responder_mirror_conf)\n self.aggregate_pcap_filename = agg_pcap_filename\n\n cmd = \"mkdir -p %s\" % self.result_path\n subprocess.call(cmd, shell = True)\n\n def rm_old_files(self):\n \"\"\" Remove result files left by previous experiments \"\"\"\n old_iter_id = 0\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n while os.path.exists(old_iter_result_path) and not os.path.isfile(old_iter_result_path):\n cmd = \"rm -rf %s\" % (old_iter_result_path)\n subprocess.call(cmd, shell=True)\n\n old_iter_id += 1\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n def get_requester_ip_list(self):\n \"\"\" Return the list of requester IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.requester.conf['nic']['ip-list']]\n\n def get_responder_ip_list(self):\n \"\"\" Return the list of responder IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.responder.conf['nic']['ip-list']]\n\n def get_num_repeats(self):\n \"\"\" Return the number of experiment repeats \"\"\"\n return self.num_repeats\n\n def sync_and_compile(self):\n \"\"\" Syncronize and compile the code on all the hosts\n\n Returns:\n bool: True if the code is synced and compiled successfully, False otherwise\n \"\"\"\n logging.info(\"Sync and compile the code\")\n\n ## Sync and compile the switch code\n ret = self.switch.sync_and_compile(self.local_workspace,\n switch.SWITCH_PROG_DIR_NAME,\n switch.SWITCH_PROG_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the switch code\")\n return False\n\n ## Sync and compile the traffic generator code\n rdma_verb = self.traffic_conf['rdma-verb'].strip().lower()\n if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER:\n logging.error(\"Invalid RDMA verb: %s\" % rdma_verb)\n return False\n\n ret = self.requester.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_client_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on requester\")\n return False\n\n ret = self.responder.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_server_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on responder\")\n return False\n\n ret = self.requester.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on requester\")\n return False\n\n ret = self.responder.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on responder\")\n return False\n\n ## Sync and compile the packet capture code\n ret = self.requester_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on requester_mirror\")\n return False\n\n ret = self.responder_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on responder_mirror\")\n return False\n\n return True\n\n def generate_switch_table_config(self):\n \"\"\" Generate the switch configuration, including:\n 1. Forward table\n 2. Mirror table\n 3. ARP table\n 4. Traffic table, including the events to inject\n\n Returns:\n bool: True if the switch configuration is generated successfully, False otherwise\n \"\"\"\n requester_nic_conf = self.requester.conf['nic']\n responder_nic_conf = self.responder.conf['nic']\n requester_mirror_nic_conf = self.requester_mirror.conf['nic']\n responder_mirror_nic_conf = self.responder_mirror.conf['nic']\n\n ## Set up forward table entries\n self.switch.conf['forward-table'] = []\n try:\n for nic_conf, host_type in zip([requester_nic_conf, responder_nic_conf, \\\n requester_mirror_nic_conf, responder_mirror_nic_conf],\n ['requester', 'responder', 'requester_mirror', 'responder_mirror']):\n forward_table_entry = {'dst-mac': nic_conf['mac'],\n 'eg-port': nic_conf['switch-port'],\n 'host': host_type}\n self.switch.conf['forward-table'].append(forward_table_entry)\n except:\n logging.error(\"Failed to set forward table\")\n return False\n\n ## Set up mirror table entries, use ingress_to_egress\n try:\n requester_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': requester_nic_conf['switch-port'],\n 'dst-port': requester_mirror_nic_conf['switch-port']}\n\n responder_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': responder_nic_conf['switch-port'],\n 'dst-port': responder_mirror_nic_conf['switch-port']}\n self.switch.conf['mirror-table'] = [requester_mirror_entry, responder_mirror_entry]\n except:\n logging.error(\"Failed to set mirror table\")\n return False\n\n requester_mac = requester_nic_conf['mac']\n responder_mac = responder_nic_conf['mac']\n requester_ip_list = requester_nic_conf['ip-list']\n responder_ip_list = responder_nic_conf['ip-list']\n ## Set up arp table entries\n arp_entries = []\n try:\n for dst_ip_list, dst_mac in zip([requester_ip_list, responder_ip_list],\n [requester_mac, responder_mac]):\n for dst_ip_subnet in dst_ip_list:\n dst_ip = dst_ip_subnet.split('/')[0]\n arp_entries.append({'dst-ip': dst_ip, 'dst-mac': dst_mac})\n self.switch.conf['arp-table'] = arp_entries\n except:\n logging.error(\"Failed to set ARP table\")\n return False\n\n ## Generate the events of each iteration for switch config\n per_iter_event_list = self.traffic_conf['data-pkt-events']\n msg_size = self.traffic_conf['message-size']\n mtu = self.traffic_conf['mtu']\n num_msgs_per_qp = self.traffic_conf['num-msgs-per-qp']\n num_pkts_per_msg = int(math.ceil(msg_size / mtu))\n self.switch.conf['traffic'] = {}\n self.switch.conf['traffic']['num-msgs-per-qp'] = num_msgs_per_qp\n self.switch.conf['traffic']['num-pkts-per-msg'] = num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'] = []\n\n if per_iter_event_list is None or len(per_iter_event_list) == 0:\n ## No events at all\n return True\n\n for i in range(num_msgs_per_qp):\n for per_iter_event in per_iter_event_list:\n global_event = copy.deepcopy(per_iter_event)\n\n ## This event is applied to all the packets of the message. We need to expand it!\n if str(global_event['psn']).lower() == 'all':\n for psn in range(num_pkts_per_msg):\n global_event['psn'] = psn + i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n else:\n global_event['psn'] += i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n\n return True\n\n def ping_mesh(self):\n \"\"\" Ping all the IP addresses between requester and responder to check the connectivity\n\n Returns:\n bool: True if all the IP addresses can be pinged successfully, False otherwise\n \"\"\"\n for requester_ip_subnet in self.requester.conf['nic']['ip-list']:\n requester_ip = requester_ip_subnet.split('/')[0]\n command = \"ping \" + requester_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.responder.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + requester_ip)\n logging.error(\"[Command return info]: %s %s\" % (', '.join(ret_val), ', '.join(err_info)))\n return False\n\n for responder_ip_subnet in self.responder.conf['nic']['ip-list']:\n responder_ip = responder_ip_subnet.split('/')[0]\n command = \"ping \" + responder_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.requester.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + responder_ip)\n logging.error(\"[Command return info]: %s %s\" % (ret_val, err_info))\n return False\n\n logging.info(\"Successfully pinged all the IP addresses between requester and responder\")\n return True\n\n def generate_switch_config_file(self):\n \"\"\" Generate the switch configuration file and copy it to the switch\n\n Returns:\n bool: True if the switch configuration file is generated and copied successfully, False otherwise\n \"\"\"\n ## Get the mac address for all the hosts\n self.requester.get_mac_address()\n self.responder.get_mac_address()\n self.requester_mirror.get_mac_address()\n self.responder_mirror.get_mac_address()\n\n ## Generate config for Match-Action table in switch\n if self.generate_switch_table_config() == False:\n logging.error(\"Failed to generate switch table configuration\")\n return False\n\n ## Dump the switch configuration into a file, and copy it to the switch\n if self.switch.dump_controller_config(self.local_workspace) == False:\n logging.error(\"Failed to dump switch config\")\n return False\n\n return True\n\n def __is_valid_traffc(self):\n \"\"\" Check if the traffic configuration is valid, including:\n 1. The tx-depth should be 1 or > 1\n 2. If tx-depth > 1, then we can only inject ECN marking events\n\n Returns:\n bool: True if the traffic configuration is valid, False otherwise\n \"\"\"\n try:\n data_pkt_events = self.traffic_conf['data-pkt-events']\n tx_depth = self.traffic_conf['tx-depth']\n\n if tx_depth == 1:\n return True\n elif tx_depth <= 0:\n return False\n\n for event in data_pkt_events:\n if event['type'] != 'ecn':\n logging.error(\"Cannot inject %s event when tx depth = %d\" % (event['type'], tx_depth))\n return False\n except:\n logging.error(\"Failed to parse traffic configuration\")\n return False\n\n return True\n\n def run_experiment(self):\n \"\"\" Run the experiment\n\n Returns:\n bool: True if the experiment is completed successfully, False otherwise\n \"\"\"\n\n ## Check if traffic configuration is valid\n if self.__is_valid_traffc() == False:\n logging.error(\"Invalid traffic configuration\")\n return False\n\n ## Run switch program\n if self.switch.run_switch() == False:\n logging.error(\"Failed to run switch\")\n return False\n\n ## Sleep for 1 second to make sure control plane is listenning (for client message)\n time.sleep(1)\n\n ## Configure the servers\n if self.requester.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA requester\")\n return False\n\n if self.responder.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA responder\")\n return False\n\n if self.requester_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on responder mirror\")\n return False\n\n ## Check the connectivity through pingmesh (try 5 rounds)\n num_tries = 0\n pingmesh_ret = False\n\n while num_tries < 5:\n pingmesh_ret = self.ping_mesh()\n if pingmesh_ret == True:\n break\n num_tries += 1\n time.sleep(1)\n\n if pingmesh_ret == False:\n logging.error(\"Failed to ping all the IP addresses between requester and responder\")\n return False\n\n ## Launch packet capture for both side\n ## Prerequisite: config hugepage and igb_uio if needed\n if self.requester_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on responder mirror\")\n return False\n\n time.sleep(3)\n\n ## Dump the counters before running\n if self.requester.dump_counters(host.REQ_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester before running\")\n return False\n\n if self.responder.dump_counters(host.RSP_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder before running\")\n return False\n\n ## Launch RDMA server first\n run_server_ret = self.responder.run_traffic_gen_server(self.traffic_conf)\n if run_server_ret == False:\n logging.error(\"Failed to run RDMA server\")\n return False\n\n time.sleep(2)\n\n ## Launch RDMA client\n try:\n destination_ip_subnet = self.responder.conf['nic']['ip-list'][0]\n destination_ip = destination_ip_subnet.split('/')[0]\n except:\n logging.error(\"Failed to get destination IP\")\n return False\n\n run_client_ret = self.requester.run_traffic_gen_client(traffic_conf=self.traffic_conf,\n destination_ip=destination_ip,\n controller_ip=self.switch.conf['control-ip'],\n controller_listen_port=self.switch.conf['listen-port'])\n if run_client_ret == False:\n logging.error(\"Failed to run RDMA client\")\n return False\n\n if self.switch.dump_results() == False:\n logging.error(\"Failed to dump results from switch\")\n return False\n\n if self.requester.dump_counters(host.REQ_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester after running\")\n return False\n\n if self.responder.dump_counters(host.RSP_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder after running\")\n return False\n\n logging.info(\"Experiment completed successfully\")\n return True\n\n def clean_up(self):\n \"\"\" Clean up the environment after the experiment\n\n Returns:\n bool: True if the clean up is completed successfully, False otherwise\n \"\"\"\n logging.info(\"Start cleaning up the environment\")\n\n if self.switch.clean_up() == False:\n logging.error(\"Failed to clean up switch\")\n return False\n\n if self.requester.clean_up() == False:\n logging.error(\"Failed to clean up requester\")\n return False\n\n if self.responder.clean_up() == False:\n logging.error(\"Failed to clean up responder\")\n return False\n\n if self.requester_mirror.clean_up() == False:\n logging.error(\"Failed to clean up requester mirror\")\n return False\n\n if self.responder_mirror.clean_up() == False:\n logging.error(\"Failed to clean up responder mirror\")\n return False\n\n return True\n\n def fetch_results(self, iter_id=0):\n \"\"\" Fetch the results of iteration 'iter_id', including:\n 1. Switch table entries and counters\n 2. Packet trace (pcap file)\n 3. Configs and end-to-end results from RDMA hosts\n\n Args:\n iter_id (int, optional): iteration ID, defaults to 0\n\n Returns:\n bool: True if the result collection is completed successfully, False otherwise\n \"\"\"\n ## Make the results dir if it does not exist\n iter_result_path = os.path.join(self.result_path, str(iter_id))\n cmd = \"mkdir -p %s\" % iter_result_path\n try:\n subprocess.call(cmd, shell=True)\n except:\n logging.error(\"Failed to create result directory %s\" % iter_result_path)\n return False\n\n if self.switch.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from switch\")\n return False\n\n if self.requester_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester mirror\")\n return False\n\n if self.responder_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder mirror\")\n return False\n\n if self.requester.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester\")\n return False\n\n if self.responder.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder\")\n return False\n\n logging.info(\"Finished fetching results for iteration %d\" % iter_id)\n return True\n\n def merge_traces(self, iter_id=0):\n iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR)\n src_pcap_file_list = [os.path.join(iter_pcap_dir_path,\n self.requester_mirror.conf['pkt-dump-conf']['dump-filename']),\n os.path.join(iter_pcap_dir_path,\n self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])]\n target_pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = pcap_process.merge_pcaps(src_pcap_file_list)\n if packet_list is None:\n logging.error(\"Failed to merge pcap files for iteration %d\" % iter_id)\n return False\n\n if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False:\n logging.error(\"Failed to dump packets to pcap file %s\" % target_pcap_path)\n return False\n\n logging.info(\"Successfully merged pcap files for iteration %d\" % iter_id)\n\n def check_integrity(self, iter_id=0):\n ## Check if the collected packet trace passes integrity check\n pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = get_packet_list(pcap_path)\n packet_list.sort(key=lambda x:x.get_switch_seqnum())\n logging.info(\"Packet trace sorted by switch sequence number.\")\n\n switch_state_snapshot = os.path.join(self.result_path,\n str(iter_id),\n switch.SWITCH_RESULT_DIR,\n switch.SWITCH_STATE_SNAPSHOT)\n port_map = {'requester': self.requester.conf['nic']['switch-port'],\n 'responder': self.responder.conf['nic']['switch-port'],\n 'requester-mirror': self.requester_mirror.conf['nic']['switch-port'],\n 'responder-mirror': self.responder_mirror.conf['nic']['switch-port']}\n switch_counter = SwitchCounter(switch_state_snapshot, port_map)\n\n integrity_checker = IntegrityCheck(packet_list=packet_list,\n switch_counter=switch_counter,\n requester_ip_list=self.get_requester_ip_list(),\n responder_ip_list=self.get_responder_ip_list())\n\n if integrity_checker.check() == True:\n logging.info(\"Integrity check passed\")\n return True\n else:\n logging.info(\"Integrity check failed\")\n return False"
},
{
"identifier": "SwitchCounter",
"path": "lumina/analyzer/counter/switch_counter.py",
"snippet": "class SwitchCounter:\n \"\"\" Class to parse switch counter files\n\n Attributes:\n _counter (dict of dict): the switch counters with the following format:\n {'requester': {'ingress': counter_value, 'egress': counter_value},\n 'responder': {'ingress': counter_value, 'egress': counter_value},\n 'requester-mirror': {'ingress': counter_value, 'egress': counter_value},\n 'responder-mirror': {'ingress': counter_value, 'egress': counter_value}}\n \"\"\"\n def __init__(self, snapshot_filename, port_map):\n \"\"\" Constructor\n\n Args:\n snapshot_filename (str): the file where switch dumps its counters\n port_map (dict): the mapping between port name and port number\n\n Returns:\n N/A\n \"\"\"\n with open(snapshot_filename, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n ingress_counters = conf['counter']['ingress']\n egress_counters = conf['counter']['egress']\n except:\n print(\"Bad yaml format in %s\" % snapshot_filename)\n sys.exit(-1)\n\n requester_port = port_map['requester']\n responder_port = port_map['responder']\n requester_mirror_port = port_map['requester-mirror']\n responder_mirror_port = port_map['responder-mirror']\n\n self._counter = {'requester' : {'ingress':0, 'egress': 0},\n 'responder' : {'ingress':0, 'egress': 0},\n 'requester-mirror' : {'ingress':0, 'egress': 0},\n 'responder-mirror' : {'ingress':0, 'egress': 0}}\n try:\n self._counter['requester']['ingress'] = ingress_counters[requester_port]\n self._counter['responder']['ingress'] = ingress_counters[responder_port]\n self._counter['requester-mirror']['ingress'] = ingress_counters[requester_mirror_port]\n self._counter['responder-mirror']['ingress'] = ingress_counters[responder_mirror_port]\n\n self._counter['requester']['egress'] = egress_counters[requester_port]\n self._counter['responder']['egress'] = egress_counters[responder_port]\n self._counter['requester-mirror']['egress'] = egress_counters[requester_mirror_port]\n self._counter['responder-mirror']['egress'] = egress_counters[responder_mirror_port]\n\n except:\n print(\"Port number not exist in the switch snapshot\")\n sys.exit(-1)\n\n def get_counter(self):\n \"\"\" Return the switch counters (dict of dict) \"\"\"\n return self._counter"
},
{
"identifier": "MLNXHostCounter",
"path": "lumina/analyzer/counter/host_counter.py",
"snippet": "class MLNXHostCounter(HostCounter):\n \"\"\" Class to parse MLNX host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_port_rcv_packets(self):\n \"\"\" Return the number of received packets \"\"\"\n return self._counter['port-counters']['port_rcv_packets']\n\n def get_port_xmit_packets(self):\n \"\"\" Return the number of transmitted packets \"\"\"\n return self._counter['port-counters']['port_xmit_packets']\n\n def get_num_packet_seq_err(self):\n \"\"\" Return the number of received NAK sequence error packets \"\"\"\n return self._counter['hw-counters']['packet_seq_err']\n\n def get_num_out_of_sequence(self):\n \"\"\" Return the number of out-of-sequence packets received \"\"\"\n return self._counter['hw-counters']['out_of_sequence']\n\n def get_num_dup_requests(self):\n \"\"\" Return the number of duplicate requests \"\"\"\n return self._counter['hw-counters']['duplicate_request']\n\n def implied_nak_seq_err(self):\n \"\"\" Return the number of READ requests implying sequence errors \"\"\"\n return self._counter['hw-counters']['implied_nak_seq_err']\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['np_cnp_sent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['np_ecn_marked_roce_packets']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['rp_cnp_handled']\n\n def get_num_icrc_errors(self):\n \"\"\" Return the number of RoCE packets with ICRC errors received \"\"\"\n return self._counter['hw-counters']['rx_icrc_encapsulated']\n\n def get_num_timeout_err(self):\n \"\"\" Return the number of times QP's ack timer expired for RC, XRC, DCT QPs at the sender side \"\"\"\n return self._counter['hw-counters']['local_ack_timeout_err']\n\n def get_num_discards_dict_tx(self):\n \"\"\" Return the number of TX discarded packets (dict)\"\"\"\n discards_dict_tx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'tx' in x:\n discards_dict_tx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_tx\n\n def get_num_discards_dict_rx(self):\n \"\"\" Return the number of RX discarded packets (dict) \"\"\"\n discards_dict_rx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'rx' in x:\n discards_dict_rx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_rx"
},
{
"identifier": "IntelHostCounter",
"path": "lumina/analyzer/counter/host_counter.py",
"snippet": "class IntelHostCounter(HostCounter):\n \"\"\" Class to parse Intel host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['cnpSent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['RxECNMrkd']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['cnpHandled']\n\n def get_num_discards_dict(self):\n \"\"\" Return the number of discarded packets (dict) \"\"\"\n discards_dict= {}\n for x in self._counter['hw-counters'].keys():\n if 'discard' in x:\n discards_dict[x] = self._counter['hw-counters'][x]\n return discards_dict"
},
{
"identifier": "get_packet_list",
"path": "lumina/analyzer/pcap_processor/pcap_process.py",
"snippet": "def get_packet_list(pcap_file):\n \"\"\" Read a pcap file and return a list of packets\n\n Args:\n pcap_file (str): The pcap file to read\n\n Returns:\n list: The list of packets if successful, empty list otherwise\n\n Raises:\n IOError: If the pcap file cannot be opened for reading\n Exception: If the pcap file cannot be read\n \"\"\"\n packet_list = []\n try:\n with open(pcap_file, 'rb') as file_read:\n pcap = dpkt.pcap.Reader(file_read)\n for packet in pcap:\n packet_list.append(roce_packet.RRoCEPacket(packet))\n except IOError:\n logging.error(\"Unable to open pcap file %s. Please check your filename.\" % pcap_file)\n raise IOError\n\n except:\n logging.error(\"Failed to read pcap file %s.\" % pcap_file)\n raise Exception\n\n logging.info(\"Successfully read %d packets from %s.\" % (len(packet_list), pcap_file))\n return packet_list"
},
{
"identifier": "config_stream_handler",
"path": "lumina/utils/config_loggers.py",
"snippet": "def config_stream_handler(logger):\n \"\"\" Configure stream handler\n\n Args:\n logger (logging.Logger): Logger object\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n logger.addHandler(console)"
},
{
"identifier": "config_file_handler",
"path": "lumina/utils/config_loggers.py",
"snippet": "def config_file_handler(logger, log_file, no_format=False):\n \"\"\" Configure file handler\n\n Args:\n logger (logging.Logger): Logger object\n log_file (str): Log file path\n no_format (bool): If True, do not format log messages (default: False)\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_file, mode=\"w\")\n if no_format == False:\n file_handler.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)"
}
] | import argparse, os, glob, logging, time
import lumina.analyzer.checker.integrity_check as integrity_check
import lumina.analyzer.checker.host_check as host_check
import lumina.analyzer.checker.cnp_check as cnp_check
import lumina.orchestrator.host as host
import lumina.orchestrator.switch as switch
from lumina.analyzer.main import get_qp_info_list, get_packet_list
from lumina.orchestrator.main import Orchestrator
from lumina.analyzer.counter.switch_counter import SwitchCounter
from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter
from lumina.analyzer.pcap_processor.pcap_process import get_packet_list
from lumina.utils.config_loggers import config_stream_handler, config_file_handler | 9,750 | def verify_results(orchestrator, rdma_verb=None, qp_index_list=None):
""" Verify experiment results
Args:
orchestrator (Orchestrator object): Orchestrator object that contains all the configurations
rdma_verb (str): RDMA verb to verify (default: None)
qp_index_list (list): List of QP indices to verify (default: None)
Returns:
N/A
"""
result_dir = orchestrator.result_path
num_repeats = orchestrator.num_repeats
aggregate_pcap_filename = orchestrator.aggregate_pcap_filename
if rdma_verb == None:
rdma_verb = orchestrator.traffic_conf['rdma-verb'].lower().strip()
if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER:
logging.error("Invalid RDMA verb: %s" % rdma_verb)
return
## A mix of RDMA SEND and READ. Need to verify both SEND and READ
if rdma_verb == 'send_read':
num_qps_send, num_qps_read = [int(x) for x in orchestrator.traffic_conf['num-qps'].split(',')]
verify_results(orchestrator=orchestrator,
rdma_verb='send',
qp_index_list=list(range(num_qps_send)))
verify_results(orchestrator=orchestrator,
rdma_verb='read',
qp_index_list=list(range(num_qps_send, num_qps_send+num_qps_read)))
return
elif rdma_verb == "read":
receiver_nic_type = orchestrator.requester.conf['nic']['type']
if orchestrator.requester.is_intel_nic():
receiver_np_enable = orchestrator.requester.conf['roce-parameters']['dcqcn-enable']
receiver_slow_restart = False
min_time_between_cnps_us = 0
elif orchestrator.requester.is_mlnx_nic():
receiver_np_enable = orchestrator.requester.conf['roce-parameters']['dcqcn-np-enable']
receiver_slow_restart = orchestrator.requester.conf['roce-parameters']['slow-restart']
min_time_between_cnps_us = orchestrator.requester.conf['roce-parameters']['min-time-between-cnps']
else:
receiver_np_enable = False
receiver_slow_restart = False
min_time_between_cnps_us = 0
else:
receiver_nic_type = orchestrator.responder.conf['nic']['type']
if orchestrator.responder.is_intel_nic():
receiver_np_enable = orchestrator.responder.conf['roce-parameters']['dcqcn-enable']
receiver_slow_restart = False
min_time_between_cnps_us = 0
elif orchestrator.responder.is_mlnx_nic():
receiver_np_enable = orchestrator.responder.conf['roce-parameters']['dcqcn-np-enable']
receiver_slow_restart = orchestrator.responder.conf['roce-parameters']['slow-restart']
min_time_between_cnps_us = orchestrator.responder.conf['roce-parameters']['min-time-between-cnps']
else:
receiver_np_enable = False
receiver_slow_restart = False
min_time_between_cnps_us = 0
nack_trigger_cnp = cnp_check.check_nack_trigger_cnp(receiver_nic_type,
receiver_np_enable,
receiver_slow_restart)
port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'],
'responder': orchestrator.responder.conf['nic']['switch-port'],
'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'],
'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']}
requester_ip_list = orchestrator.get_requester_ip_list()
responder_ip_list = orchestrator.get_responder_ip_list()
for iter in range(num_repeats):
iter = str(iter)
result_logger = logging.getLogger('Iter %s Verb %s' % (iter, rdma_verb))
result_logger.handlers.clear()
config_file_handler(logger=result_logger,
log_file=os.path.join(result_dir, iter, RESULT_FILENAME),
no_format=True)
result_logger.info("=" * 100)
result_logger.info("Iteration %s Verb %s" % (iter, rdma_verb))
switch_msg_snapshot = os.path.join(result_dir,
iter,
switch.SWITCH_RESULT_DIR,
switch.SWITCH_MESSAGE_SNAPSHOT)
switch_state_snapshot = os.path.join(result_dir,
iter,
switch.SWITCH_RESULT_DIR,
switch.SWITCH_STATE_SNAPSHOT)
pcap_filename = os.path.join(result_dir,
iter,
host.PCAP_RESULT_DIR,
aggregate_pcap_filename)
requester_counter_start = os.path.join(result_dir,
iter,
host.RDMA_RESULT_DIR,
host.REQ_START_COUNTER_FILE_NAME)
requester_counter_finish = os.path.join(result_dir,
iter,
host.RDMA_RESULT_DIR,
host.REQ_FINISH_COUNTER_FILE_NAME)
responder_counter_start = os.path.join(result_dir,
iter,
host.RDMA_RESULT_DIR,
host.RSP_START_COUNTER_FILE_NAME)
responder_counter_finish = os.path.join(result_dir,
iter,
host.RDMA_RESULT_DIR,
host.RSP_FINISH_COUNTER_FILE_NAME)
switch_counter = SwitchCounter(switch_state_snapshot, port_map)
if orchestrator.requester.is_mlnx_nic():
requester_counter = MLNXHostCounter(requester_counter_start, requester_counter_finish)
elif orchestrator.requester.is_intel_nic():
|
## All logs will be logged into file LOG_FILENAME
LOG_FILENAME = "test_cnp.log"
## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME
RESULT_FILENAME = "result.log"
## Max # of retries for each experiment iteration
MAX_NB_EXP_RETRIES = 3
def setup_root_logger(orchestrator):
""" Setup the root logger
Args:
orchestrator (Orchestrator object): Orchestrator object that contains all the configurations
"""
root_logger = logging.getLogger()
root_logger.handlers.clear()
config_stream_handler(root_logger)
config_file_handler(logger=root_logger,
log_file=os.path.join(orchestrator.result_path, LOG_FILENAME),
no_format=False)
def run_traffic(orchestrator):
""" Run the traffic and collect results
Args:
orchestrator (Orchestrator object): Orchestrator object that contains all the configurations
Returns:
bool: True if successful, False otherwise
"""
orchestrator.rm_old_files()
if orchestrator.sync_and_compile() == False:
logging.error("Failed to sync and compile the code")
sys.exit(-1)
logging.info("Sync and compile completed")
if orchestrator.generate_switch_config_file() == False:
logging.error("Failed to generate switch configuration file")
sys.exit(-1)
num_repeats = orchestrator.get_num_repeats()
for i in range(num_repeats):
logging.info("=" * 100)
nb_retry = 0
iter_result = False
while nb_retry < MAX_NB_EXP_RETRIES:
if orchestrator.run_experiment() == False:
logging.error("Iteration %d: Failed to complete experiment" % i)
logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry)
nb_retry += 1
orchestrator.clean_up()
time.sleep(5)
continue
logging.info("Iteration %d: Completed experiment" % i)
try:
orchestrator.clean_up()
orchestrator.fetch_results(i)
logging.info("Iteration %d: Fetch experiment results" % i)
orchestrator.merge_traces(i)
logging.info("Iteration %d: Merge the pcap files" % i)
except:
logging.error("Iteration %d: Result collection failed" % (i))
logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry))
nb_retry += 1
time.sleep(5)
continue
if orchestrator.check_integrity(i) == False:
logging.error("Iteration %d: Integrity check failed" % (i))
logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry))
nb_retry += 1
time.sleep(5)
continue
iter_result = True
break
if iter_result is False:
logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry))
return False
return True
def verify_results(orchestrator, rdma_verb=None, qp_index_list=None):
""" Verify experiment results
Args:
orchestrator (Orchestrator object): Orchestrator object that contains all the configurations
rdma_verb (str): RDMA verb to verify (default: None)
qp_index_list (list): List of QP indices to verify (default: None)
Returns:
N/A
"""
result_dir = orchestrator.result_path
num_repeats = orchestrator.num_repeats
aggregate_pcap_filename = orchestrator.aggregate_pcap_filename
if rdma_verb == None:
rdma_verb = orchestrator.traffic_conf['rdma-verb'].lower().strip()
if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER:
logging.error("Invalid RDMA verb: %s" % rdma_verb)
return
## A mix of RDMA SEND and READ. Need to verify both SEND and READ
if rdma_verb == 'send_read':
num_qps_send, num_qps_read = [int(x) for x in orchestrator.traffic_conf['num-qps'].split(',')]
verify_results(orchestrator=orchestrator,
rdma_verb='send',
qp_index_list=list(range(num_qps_send)))
verify_results(orchestrator=orchestrator,
rdma_verb='read',
qp_index_list=list(range(num_qps_send, num_qps_send+num_qps_read)))
return
elif rdma_verb == "read":
receiver_nic_type = orchestrator.requester.conf['nic']['type']
if orchestrator.requester.is_intel_nic():
receiver_np_enable = orchestrator.requester.conf['roce-parameters']['dcqcn-enable']
receiver_slow_restart = False
min_time_between_cnps_us = 0
elif orchestrator.requester.is_mlnx_nic():
receiver_np_enable = orchestrator.requester.conf['roce-parameters']['dcqcn-np-enable']
receiver_slow_restart = orchestrator.requester.conf['roce-parameters']['slow-restart']
min_time_between_cnps_us = orchestrator.requester.conf['roce-parameters']['min-time-between-cnps']
else:
receiver_np_enable = False
receiver_slow_restart = False
min_time_between_cnps_us = 0
else:
receiver_nic_type = orchestrator.responder.conf['nic']['type']
if orchestrator.responder.is_intel_nic():
receiver_np_enable = orchestrator.responder.conf['roce-parameters']['dcqcn-enable']
receiver_slow_restart = False
min_time_between_cnps_us = 0
elif orchestrator.responder.is_mlnx_nic():
receiver_np_enable = orchestrator.responder.conf['roce-parameters']['dcqcn-np-enable']
receiver_slow_restart = orchestrator.responder.conf['roce-parameters']['slow-restart']
min_time_between_cnps_us = orchestrator.responder.conf['roce-parameters']['min-time-between-cnps']
else:
receiver_np_enable = False
receiver_slow_restart = False
min_time_between_cnps_us = 0
nack_trigger_cnp = cnp_check.check_nack_trigger_cnp(receiver_nic_type,
receiver_np_enable,
receiver_slow_restart)
port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'],
'responder': orchestrator.responder.conf['nic']['switch-port'],
'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'],
'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']}
requester_ip_list = orchestrator.get_requester_ip_list()
responder_ip_list = orchestrator.get_responder_ip_list()
for iter in range(num_repeats):
iter = str(iter)
result_logger = logging.getLogger('Iter %s Verb %s' % (iter, rdma_verb))
result_logger.handlers.clear()
config_file_handler(logger=result_logger,
log_file=os.path.join(result_dir, iter, RESULT_FILENAME),
no_format=True)
result_logger.info("=" * 100)
result_logger.info("Iteration %s Verb %s" % (iter, rdma_verb))
switch_msg_snapshot = os.path.join(result_dir,
iter,
switch.SWITCH_RESULT_DIR,
switch.SWITCH_MESSAGE_SNAPSHOT)
switch_state_snapshot = os.path.join(result_dir,
iter,
switch.SWITCH_RESULT_DIR,
switch.SWITCH_STATE_SNAPSHOT)
pcap_filename = os.path.join(result_dir,
iter,
host.PCAP_RESULT_DIR,
aggregate_pcap_filename)
requester_counter_start = os.path.join(result_dir,
iter,
host.RDMA_RESULT_DIR,
host.REQ_START_COUNTER_FILE_NAME)
requester_counter_finish = os.path.join(result_dir,
iter,
host.RDMA_RESULT_DIR,
host.REQ_FINISH_COUNTER_FILE_NAME)
responder_counter_start = os.path.join(result_dir,
iter,
host.RDMA_RESULT_DIR,
host.RSP_START_COUNTER_FILE_NAME)
responder_counter_finish = os.path.join(result_dir,
iter,
host.RDMA_RESULT_DIR,
host.RSP_FINISH_COUNTER_FILE_NAME)
switch_counter = SwitchCounter(switch_state_snapshot, port_map)
if orchestrator.requester.is_mlnx_nic():
requester_counter = MLNXHostCounter(requester_counter_start, requester_counter_finish)
elif orchestrator.requester.is_intel_nic(): | requester_counter = IntelHostCounter(requester_counter_start, requester_counter_finish) | 4 | 2023-12-09 08:21:14+00:00 | 12k |
Tlntin/booking_simulator | apps/agentfabric/user_core.py | [
{
"identifier": "parse_configuration",
"path": "config_utils.py",
"snippet": "def parse_configuration(uuid_str=''):\n \"\"\"parse configuration\n\n Args:\n\n Returns:\n dict: parsed configuration\n\n \"\"\"\n model_cfg_file = os.getenv('MODEL_CONFIG_FILE', DEFAULT_MODEL_CONFIG_FILE)\n\n builder_cfg_file = get_user_cfg_file(uuid_str)\n # use default if not exists\n if not os.path.exists(builder_cfg_file):\n # create parents directory\n os.makedirs(os.path.dirname(builder_cfg_file), exist_ok=True)\n # copy the template to the address\n builder_cfg_file_temp = './config/builder_config.json'\n\n if builder_cfg_file_temp != builder_cfg_file:\n shutil.copy(builder_cfg_file_temp, builder_cfg_file)\n\n tool_cfg_file = os.getenv('TOOL_CONFIG_FILE', DEFAULT_TOOL_CONFIG_FILE)\n\n builder_cfg = Config.from_file(builder_cfg_file)\n model_cfg = Config.from_file(model_cfg_file)\n tool_cfg = Config.from_file(tool_cfg_file)\n\n tools_info = builder_cfg.tools\n available_tool_list = []\n for key, value in tools_info.items():\n if value['use']:\n available_tool_list.append(key)\n tool_cfg[key]['use'] = value['use']\n\n openapi_plugin_file = get_user_openapi_plugin_cfg_file(uuid_str)\n plugin_cfg = {}\n available_plugin_list = []\n if os.path.exists(openapi_plugin_file):\n openapi_plugin_cfg = Config.from_file(openapi_plugin_file)\n try:\n config_dict = openapi_schema_convert(\n schema=openapi_plugin_cfg.schema,\n auth=openapi_plugin_cfg.auth.to_dict())\n plugin_cfg = Config(config_dict)\n for name, config in config_dict.items():\n available_plugin_list.append(name)\n except Exception as e:\n error = traceback.format_exc()\n print(f'Error:{e}, with detail: {error}')\n print(\n 'Error:FormatError, with detail: The format of the plugin config file is incorrect.'\n )\n\n return builder_cfg, model_cfg, tool_cfg, available_tool_list, plugin_cfg, available_plugin_list"
},
{
"identifier": "DEFAULT_EXEC_TEMPLATE",
"path": "custom_prompt.py",
"snippet": "DEFAULT_EXEC_TEMPLATE = \"\"\"Observation: <result><exec_result></result>\\nAnswer:\"\"\""
},
{
"identifier": "DEFAULT_SYSTEM_TEMPLATE",
"path": "custom_prompt.py",
"snippet": "DEFAULT_SYSTEM_TEMPLATE = \"\"\"\n\n# 工具\n\n## 你拥有如下工具:\n\n<tool_list>\n\n## 当你需要调用工具时,请在你的回复中穿插如下的工具调用命令,可以根据需求调用零次或多次:\n## 即将调用工具时,只说即将调用[工具名],不要说其他多余的话。\n## 你只能从用户输入中获取关键信息,不能自己生成,所有获得的关键信息必须要参考用户输入作为依据。\n\n工具调用\nAction: 工具的名称,必须是<tool_name_list>之一\nAction Input: 工具的输入\nObservation: <result>工具返回的结果</result>\nAnswer: 根据Observation总结本次工具调用返回的结果,如果结果中出现url,请不要展示出。\n\n```\n[链接](url)\n```\n\n# 指令\n\"\"\""
},
{
"identifier": "DEFAULT_USER_TEMPLATE",
"path": "custom_prompt.py",
"snippet": "DEFAULT_USER_TEMPLATE = \"\"\"(你正在扮演<role_name>,你可以使用工具:<tool_name_list><knowledge_note>)<file_names><user_input>\"\"\""
},
{
"identifier": "CustomPromptGenerator",
"path": "custom_prompt.py",
"snippet": "class CustomPromptGenerator(PromptGenerator):\n\n def __init__(self,\n system_template=DEFAULT_SYSTEM_TEMPLATE,\n instruction_template=DEFAULT_INSTRUCTION_TEMPLATE,\n user_template=DEFAULT_USER_TEMPLATE,\n exec_template=DEFAULT_EXEC_TEMPLATE,\n assistant_template='',\n sep='\\n\\n',\n llm=None,\n length_constraint=LengthConstraint(),\n **kwargs):\n super().__init__(\n system_template=system_template,\n instruction_template=instruction_template,\n user_template=user_template,\n exec_template=exec_template,\n assistant_template=assistant_template,\n sep=sep,\n llm=llm,\n length_constraint=length_constraint)\n # hack here for special prompt, such as add an addition round before user input\n self.add_addition_round = kwargs.get('add_addition_round', False)\n self.addition_assistant_reply = kwargs.get('addition_assistant_reply',\n '')\n builder_cfg_file = get_user_cfg_file(\n uuid_str=kwargs.get('uuid_str', ''))\n builder_cfg = Config.from_file(builder_cfg_file)\n self.builder_cfg = builder_cfg\n self.knowledge_file_name = kwargs.get('knowledge_file_name', '')\n\n self.llm = llm\n self.prompt_preprocessor = build_raw_prompt(llm.model_id)\n self.length_constraint = length_constraint\n self._parse_length_restriction()\n\n def _parse_length_restriction(self):\n constraint = self.llm.cfg.get('length_constraint', None)\n # if isinstance(constraint, Config):\n # constraint = constraint.to_dict()\n self.length_constraint.update(constraint)\n\n def _update_user_prompt_without_knowledge(self, task, tool_list, **kwargs):\n if len(tool_list) > 0:\n # user input\n user_input = self.user_template.replace('<role_name>',\n self.builder_cfg.name)\n user_input = user_input.replace(\n '<tool_name_list>',\n ','.join([tool.name for tool in tool_list]))\n else:\n self.user_template = DEFAULT_USER_TEMPLATE_WITHOUT_TOOL\n user_input = self.user_template.replace('<user_input>', task)\n user_input = user_input.replace('<role_name>',\n self.builder_cfg.name)\n\n user_input = user_input.replace('<user_input>', task)\n\n if 'append_files' in kwargs:\n append_files = kwargs.get('append_files', [])\n if len(append_files) > 0:\n file_names = ','.join(\n [os.path.basename(path) for path in append_files])\n user_input = user_input.replace('<file_names>',\n f'[上传文件{file_names}]')\n else:\n user_input = user_input.replace('<file_names>', '')\n else:\n user_input = user_input.replace('<file_names>', '')\n\n return user_input\n\n def init_prompt(self, task, tool_list, knowledge_list, **kwargs):\n\n if len(self.history) == 0:\n\n self.history.append({\n 'role': 'system',\n 'content': 'You are a helpful assistant.'\n })\n\n if len(tool_list) > 0:\n prompt = f'{self.system_template}\\n{self.instruction_template}'\n\n # get tool description str\n tool_str = self.get_tool_str(tool_list)\n prompt = prompt.replace('<tool_list>', tool_str)\n\n tool_name_str = self.get_tool_name_str(tool_list)\n prompt = prompt.replace('<tool_name_list>', tool_name_str)\n else:\n self.system_template = DEFAULT_SYSTEM_TEMPLATE_WITHOUT_TOOL\n prompt = f'{self.system_template}\\n{self.instruction_template}'\n\n user_input = self._update_user_prompt_without_knowledge(\n task, tool_list, **kwargs)\n\n if len(knowledge_list) > 0:\n user_input = user_input.replace('<knowledge_note>',\n ',请查看前面的知识库')\n else:\n user_input = user_input.replace('<knowledge_note>', '')\n\n self.system_prompt = copy.deepcopy(prompt)\n\n # build history\n if self.add_addition_round:\n self.history.append({\n 'role': 'user',\n 'content': self.system_prompt\n })\n self.history.append({\n 'role': 'assistant',\n 'content': self.addition_assistant_reply\n })\n self.history.append({'role': 'user', 'content': user_input})\n self.history.append({\n 'role': 'assistant',\n 'content': self.assistant_template\n })\n else:\n self.history.append({\n 'role': 'user',\n 'content': self.system_prompt + user_input\n })\n self.history.append({\n 'role': 'assistant',\n 'content': self.assistant_template\n })\n\n self.function_calls = self.get_function_list(tool_list)\n else:\n user_input = self._update_user_prompt_without_knowledge(\n task, tool_list, **kwargs)\n if len(knowledge_list) > 0:\n user_input = user_input.replace('<knowledge_note>',\n ',请查看前面的知识库')\n else:\n user_input = user_input.replace('<knowledge_note>', '')\n\n self.history.append({'role': 'user', 'content': user_input})\n self.history.append({\n 'role': 'assistant',\n 'content': self.assistant_template\n })\n\n if len(knowledge_list) > 0:\n knowledge_str = self.get_knowledge_str(\n knowledge_list,\n file_name=self.knowledge_file_name,\n only_content=True)\n self.update_knowledge_str(knowledge_str)\n\n def update_knowledge_str(self, knowledge_str):\n \"\"\"If knowledge base information was not used previously, it will be added;\n if knowledge base information was previously used, it will be replaced.\n\n Args:\n knowledge_str (str): knowledge str generated by get_knowledge_str\n \"\"\"\n knowledge_introduction = KNOWLEDGE_INTRODUCTION_PROMPT.replace(\n '<file_name>', self.knowledge_file_name)\n if len(knowledge_str) > self.length_constraint.knowledge:\n # todo: use tokenizer to constrain length\n knowledge_str = knowledge_str[-self.length_constraint.knowledge:]\n knowledge_str = f'{KNOWLEDGE_PROMPT}{self.sep}{knowledge_introduction}{self.sep}{knowledge_str}'\n\n for i in range(0, len(self.history)):\n if self.history[i]['role'] == 'user':\n content: str = self.history[i]['content']\n start_pos = content.find(f'{KNOWLEDGE_PROMPT}{self.sep}')\n end_pos = content.rfind('\\n\\n# 工具\\n\\n')\n if start_pos >= 0 and end_pos >= 0: # replace knowledge\n\n self.history[i]['content'] = content[\n 0:start_pos] + knowledge_str + content[end_pos:]\n break\n elif start_pos < 0 and end_pos == 0: # add knowledge\n self.history[i]['content'] = knowledge_str + content\n break\n else:\n continue\n\n def get_tool_str(self, tool_list):\n tool_texts = []\n for tool in tool_list:\n tool_texts.append(\n TOOL_DESC.format(\n name_for_model=tool.name,\n name_for_human=tool.name,\n description_for_model=tool.description,\n parameters=json.dumps(tool.parameters,\n ensure_ascii=False)))\n # + ' ' + FORMAT_DESC['json'])\n tool_str = '\\n\\n'.join(tool_texts)\n return tool_str\n\n def get_tool_name_str(self, tool_list):\n tool_name = []\n for tool in tool_list:\n tool_name.append(tool.name)\n\n tool_name_str = json.dumps(tool_name, ensure_ascii=False)\n return tool_name_str\n\n def _generate(self, llm_result, exec_result: str):\n \"\"\"\n generate next round prompt based on previous llm_result and exec_result and update history\n \"\"\"\n if len(llm_result) != 0:\n self.history[-1]['content'] += f'{llm_result}'\n if len(exec_result) != 0:\n # handle image markdown wrapper\n image_markdown_re = re.compile(\n pattern=r'!\\[IMAGEGEN\\]\\(([\\s\\S]+)\\)')\n match = image_markdown_re.search(exec_result)\n if match is not None:\n exec_result = match.group(1).rstrip()\n exec_result = self.exec_template.replace('<exec_result>',\n str(exec_result))\n self.history[-1]['content'] += exec_result\n\n # generate plate prompt here\n self.prompt = self.prompt_preprocessor(self.history)\n return self.prompt"
},
{
"identifier": "parse_role_config",
"path": "custom_prompt.py",
"snippet": "def parse_role_config(config: dict):\n prompt = '你扮演AI-Agent,'\n\n # concat prompt\n if 'name' in config and config['name']:\n prompt += ('你的名字是' + config['name'] + '。')\n if 'description' in config and config['description']:\n prompt += config['description']\n prompt += '\\n你具有下列具体功能:'\n if 'instruction' in config and config['instruction']:\n if isinstance(config['instruction'], list):\n for ins in config['instruction']:\n prompt += ins\n prompt += ';'\n elif isinstance(config['instruction'], str):\n prompt += config['instruction']\n if prompt[-1] == ';':\n prompt = prompt[:-1]\n prompt += '\\n下面你将开始扮演'\n eastern_eight_zone = pytz.timezone('Asia/Shanghai')\n\n # 获取东八区的当前时间\n eastern_time = datetime.now(eastern_eight_zone)\n # 格式化时间\n formatted_time = eastern_time.strftime(\"%Y-%m-%d %H:%M\")\n formatted_weekday = eastern_time.weekday()\n temp_list = [\"一\", \"二\", \"三\", \"四\", \"五\", \"六\", \"日\"]\n formatted_weekday = temp_list[formatted_weekday]\n prompt += f\"\\n当前时间是:{formatted_time},星期{formatted_weekday}。\"\n prompt += \"你的数学很强,计算相对日期对你来说轻而易举。\"\n if 'name' in config and config['name']:\n prompt += config['name']\n prompt += ',明白了请说“好的。”,不要说其他的。'\n return prompt"
},
{
"identifier": "AgentExecutor",
"path": "modelscope_agent/agent.py",
"snippet": "class AgentExecutor:\n\n def __init__(self,\n llm: LLM,\n tool_cfg: Optional[Dict] = {},\n agent_type: AgentType = AgentType.DEFAULT,\n additional_tool_list: Optional[Dict] = {},\n prompt_generator: Optional[PromptGenerator] = None,\n output_parser: Optional[OutputParser] = None,\n tool_retrieval: Optional[Union[bool, ToolRetrieval]] = True,\n knowledge_retrieval: Optional[KnowledgeRetrieval] = None):\n \"\"\"\n the core class of ms agent. It is responsible for the interaction between user, llm and tools,\n and return the execution result to user.\n\n Args:\n llm (LLM): llm model, can be load from local or a remote server.\n tool_cfg (Optional[Dict]): cfg of default tools\n agent_type (AgentType, optional): agent type. Defaults to AgentType.DEFAULT, decide which type of agent\n reasoning type to use\n additional_tool_list (Optional[Dict], optional): user-defined additional tool list. Defaults to {}.\n prompt_generator (Optional[PromptGenerator], optional): this module is responsible for generating prompt\n according to interaction result. Defaults to use MSPromptGenerator.\n output_parser (Optional[OutputParser], optional): this module is responsible for parsing output of llm\n to executable actions. Defaults to use MsOutputParser.\n tool_retrieval (Optional[Union[bool, ToolRetrieval]], optional): Retrieve related tools by input task,\n since most of the tools may be useless for LLM in specific task.\n If it is bool type and is True, will use default tool_retrieval. Defaults to True.\n knowledge_retrieval (Optional[KnowledgeRetrieval], optional): If user want to use extra knowledge,\n this component can be used to retrieve related knowledge. Defaults to None.\n \"\"\"\n\n self.llm = llm\n\n self.agent_type = agent_type\n self.llm.set_agent_type(agent_type)\n self.prompt_generator = prompt_generator or get_prompt_generator(\n agent_type)\n self.output_parser = output_parser or get_output_parser(agent_type)\n\n self._init_tools(tool_cfg, additional_tool_list)\n\n if isinstance(tool_retrieval, bool) and tool_retrieval:\n tool_retrieval = ToolRetrieval()\n self.tool_retrieval = tool_retrieval\n if self.tool_retrieval:\n self.tool_retrieval.construct(\n [str(t) for t in self.tool_list.values()])\n self.knowledge_retrieval = knowledge_retrieval\n self.reset()\n self.seed = None\n\n def _init_tools(self,\n tool_cfg: Dict = {},\n additional_tool_list: Dict = {}):\n \"\"\"init tool list of agent. We provide a default tool list, which is initialized by a cfg file.\n user can also provide user-defined tools by additional_tool_list.\n The key of additional_tool_list is tool name, and the value is corresponding object.\n\n Args:\n tool_cfg (Dict): default tool cfg.\n additional_tool_list (Dict, optional): user-defined tools. Defaults to {}.\n \"\"\"\n self.tool_list = {}\n tool_info_list = {**TOOL_INFO_LIST, **additional_tool_list}\n tools_module = importlib.import_module('modelscope_agent.tools')\n for tool_name in tool_cfg.keys():\n if tool_cfg[tool_name].get('use', False):\n assert tool_name in tool_info_list, f'Invalid tool name: {tool_name}, ' \\\n f'available ones are: {tool_info_list.keys()}'\n tool_class_name = tool_info_list[tool_name]\n tool_class = getattr(tools_module, tool_class_name)\n tool_name = tool_class.name\n self.tool_list[tool_name] = tool_class(tool_cfg)\n\n self.tool_list = {**self.tool_list, **additional_tool_list}\n # self.available_tool_list = deepcopy(self.tool_list)\n self.set_available_tools(self.tool_list.keys())\n\n def set_available_tools(self, available_tool_list):\n # TODO @wenmeng.zwm refine tool init\n for t in available_tool_list:\n if t not in self.tool_list:\n raise ValueError(\n f'Unsupported tools found:{t}, please check, valid ones: {self.tool_list.keys()}'\n )\n\n self.available_tool_list = {\n k: self.tool_list[k]\n for k in available_tool_list\n }\n\n def retrieve_tools(self, query: str) -> List[str]:\n \"\"\"retrieve tools given query\n\n Args:\n query (str): query\n\n \"\"\"\n if self.tool_retrieval:\n retrieve_tools = self.tool_retrieval.retrieve(query)\n self.set_available_tools(available_tool_list=retrieve_tools.keys())\n return self.available_tool_list.values()\n\n def get_knowledge(self, query: str) -> List[str]:\n \"\"\"retrieve knowledge given query\n\n Args:\n query (str): query\n\n \"\"\"\n return self.knowledge_retrieval.retrieve(\n query) if self.knowledge_retrieval else []\n\n def run(self,\n task: str,\n remote: bool = False,\n print_info: bool = False,\n append_files: list = []) -> List[Dict]:\n \"\"\" use llm and tools to execute task given by user\n\n Args:\n task (str): concrete task\n remote (bool, optional): whether to execute tool in remote mode. Defaults to False.\n print_info (bool, optional): whether to print prompt info. Defaults to False.\n\n Returns:\n List[Dict]: execute result. One task may need to interact with llm multiple times,\n so a list of dict is returned. Each dict contains the result of one interaction.\n \"\"\"\n\n # retrieve tools\n tool_list = self.retrieve_tools(task)\n knowledge_list = self.get_knowledge(task)\n\n self.prompt_generator.init_prompt(\n task, tool_list, knowledge_list, append_files=append_files)\n function_list = self.prompt_generator.get_function_list(tool_list)\n\n llm_result, exec_result = '', ''\n\n idx = 0\n final_res = []\n\n while True:\n idx += 1\n\n # generate prompt and call llm\n llm_artifacts = self.prompt_generator.generate(\n llm_result, exec_result)\n try:\n llm_result = self.llm.generate(llm_artifacts, function_list)\n except RuntimeError as e:\n return [{'exec_result': str(e)}]\n\n if print_info:\n print(f'|LLM inputs in round {idx}: {llm_artifacts}')\n\n # parse and get tool name and arguments\n try:\n action, action_args = self.output_parser.parse_response(\n llm_result)\n except ValueError as e:\n return [{'exec_result': f'{e}'}]\n\n if action is None:\n # in chat mode, the final result of last instructions should be updated to prompt history\n _ = self.prompt_generator.generate(llm_result, '')\n\n # for summarize\n display(llm_result, {}, idx, self.agent_type)\n return final_res\n\n if action in self.available_tool_list:\n action_args = self.parse_action_args(action_args)\n tool = self.tool_list[action]\n\n # TODO @wenmeng.zwm remove this hack logic for image generation\n if action == 'image_gen' and self.seed:\n action_args['seed'] = self.seed\n try:\n exec_result = tool(**action_args, remote=remote)\n if print_info:\n print(f'|exec_result: {exec_result}')\n\n # parse exec result and store result to agent state\n final_res.append(exec_result)\n self.parse_exec_result(exec_result)\n except Exception as e:\n exec_result = f'Action call error: {action}: {action_args}. \\n Error message: {e}'\n return [{'exec_result': exec_result}]\n else:\n exec_result = f\"Unknown action: '{action}'. \"\n return [{'exec_result': exec_result}]\n\n # display result\n display(llm_result, exec_result, idx, self.agent_type)\n\n def stream_run(self,\n uuid_str: str,\n task: str,\n remote: bool = True,\n print_info: bool = False,\n append_files: list = []) -> Dict:\n \"\"\"this is a stream version of run, which can be used in scenario like gradio.\n It will yield the result of each interaction, so that the caller can display the result\n\n Args:\n uuid_str: str,\n task (str): concrete task\n remote (bool, optional): whether to execute tool in remote mode. Defaults to True.\n print_info (bool, optional): whether to print prompt info. Defaults to False.\n files that individually used in each run, no need to record to global state\n\n Yields:\n Iterator[Dict]: iterator of llm response and tool execution result\n \"\"\"\n\n # retrieve tools\n tool_list = self.retrieve_tools(task)\n knowledge_list = self.get_knowledge(task)\n\n self.prompt_generator.init_prompt(\n task,\n tool_list,\n knowledge_list,\n append_files=append_files,\n )\n function_list = self.prompt_generator.get_function_list(tool_list)\n\n llm_result, exec_result = '', ''\n\n idx = 0\n\n while True:\n idx += 1\n llm_artifacts = self.prompt_generator.generate(\n llm_result, exec_result)\n if print_info:\n print(f'|LLM inputs in round {idx}:\\n{llm_artifacts}')\n\n llm_result = ''\n try:\n for s in self.llm.stream_generate(llm_artifacts,\n function_list):\n llm_result += s\n yield {'llm_text': s}\n except RuntimeError:\n s = self.llm.generate(llm_artifacts)\n llm_result += s\n yield {'llm_text': s}\n except Exception as e:\n yield {'llm_text': str(e)}\n\n # parse and get tool name and arguments\n try:\n action, action_args = self.output_parser.parse_response(\n llm_result)\n except ValueError as e:\n yield {'exec_result': f'{e}'}\n return\n\n if action is None:\n # in chat mode, the final result of last instructions should be updated to prompt history\n _ = self.prompt_generator.generate(llm_result, '')\n yield {'is_final': True}\n return\n\n if action in self.available_tool_list:\n # yield observation to as end of action input symbol asap\n yield {'llm_text': 'Observation: '}\n action_args = self.parse_action_args(action_args)\n tool = self.tool_list[action]\n\n action_args[\"uuid_str\"] = uuid_str\n # TODO @wenmeng.zwm remove this hack logic for image generation\n if action == 'image_gen' and self.seed:\n action_args['seed'] = self.seed\n try:\n exec_result = tool(**action_args, remote=remote)\n yield {'exec_result': exec_result}\n\n # parse exec result and update state\n self.parse_exec_result(exec_result)\n except Exception as e:\n exec_result = f'Action call error: {action}: {action_args}. \\n Error message: {e}'\n yield {'exec_result': exec_result}\n self.prompt_generator.reset()\n return\n else:\n exec_result = f\"Unknown action: '{action}'. \"\n yield {'exec_result': exec_result}\n self.prompt_generator.reset()\n return\n\n def reset(self):\n \"\"\"\n clear history and agent state\n \"\"\"\n self.prompt_generator.reset()\n self.agent_state = {}\n\n def parse_action_args(self, action_args):\n \"\"\"\n replace action_args in str to Image/Video/Audio Wrapper, so that tool can handle them\n \"\"\"\n parsed_action_args = {}\n for name, arg in action_args.items():\n try:\n true_arg = self.agent_state.get(arg, arg)\n except Exception as e:\n print(f'Error when parsing action args: {e}, using fall back')\n true_arg = arg\n parsed_action_args[name] = true_arg\n return parsed_action_args\n\n def parse_exec_result(self, exec_result, *args, **kwargs):\n \"\"\"\n update exec result to agent state.\n key is the str representation of the result.\n \"\"\"\n for k, v in exec_result.items():\n self.agent_state[str(v)] = v"
},
{
"identifier": "AgentType",
"path": "modelscope_agent/agent_types.py",
"snippet": "class AgentType(str, Enum):\n\n DEFAULT = 'default'\n \"\"\"\"\"\"\n\n MS_AGENT = 'ms-agent'\n \"\"\"An agent that uses the ModelScope-agent specific format does a reasoning step before acting .\n \"\"\"\n\n MRKL = 'mrkl'\n \"\"\"An agent that does a reasoning step before acting with mrkl\"\"\"\n\n REACT = 'react'\n \"\"\"An agent that does a reasoning step before acting with react\"\"\"\n\n Messages = 'messages'\n \"\"\"An agent optimized for using open AI functions.\"\"\""
},
{
"identifier": "LLMFactory",
"path": "modelscope_agent/llm/llm_factory.py",
"snippet": "class LLMFactory:\n\n @staticmethod\n def build_llm(model_name, cfg):\n llm_type = cfg[model_name].pop('type')\n llm_cls = get_llm_cls(llm_type, model_name)\n llm_cfg = cfg[model_name]\n return llm_cls(cfg=llm_cfg)"
},
{
"identifier": "KnowledgeRetrieval",
"path": "modelscope_agent/retrieve.py",
"snippet": "class KnowledgeRetrieval(Retrieval):\n\n def __init__(self,\n docs,\n embedding: Embeddings = None,\n vs_cls: VectorStore = None,\n top_k: int = 5,\n vs_params: Dict = {}):\n super().__init__(embedding, vs_cls, top_k, vs_params)\n self.construct(docs)\n\n @classmethod\n def from_file(cls,\n file_path: Union[str, list],\n embedding: Embeddings = None,\n vs_cls: VectorStore = None,\n top_k: int = 5,\n vs_params: Dict = {}):\n\n textsplitter = CharacterTextSplitter()\n all_files = []\n if isinstance(file_path, str) and os.path.isfile(file_path):\n all_files.append(file_path)\n elif isinstance(file_path, list):\n all_files = file_path\n elif os.path.isdir(file_path):\n for root, dirs, files in os.walk(file_path):\n for f in files:\n all_files.append(os.path.join(root, f))\n else:\n raise ValueError('file_path must be a file or a directory')\n\n docs = []\n for f in all_files:\n if f.lower().endswith('.txt'):\n loader = TextLoader(f, autodetect_encoding=True)\n docs += (loader.load_and_split(textsplitter))\n elif f.lower().endswith('.md'):\n loader = UnstructuredFileLoader(f, mode='elements')\n docs += loader.load()\n elif f.lower().endswith('.pdf'):\n loader = PyPDFLoader(f)\n docs += (loader.load_and_split(textsplitter))\n else:\n print(f'not support file type: {f}, will be support soon')\n\n if len(docs) == 0:\n return None\n else:\n return cls(docs, embedding, vs_cls, top_k, vs_params)"
},
{
"identifier": "OpenAPIPluginTool",
"path": "modelscope_agent/tools/openapi_plugin.py",
"snippet": "class OpenAPIPluginTool(Tool):\n \"\"\"\n openapi schema tool\n \"\"\"\n name: str = 'api tool'\n description: str = 'This is a api tool that ...'\n parameters: list = []\n\n def __init__(self, cfg, name):\n self.name = name\n self.cfg = cfg.get(self.name, {})\n self.is_remote_tool = self.cfg.get('is_remote_tool', False)\n # remote call\n self.url = self.cfg.get('url', '')\n self.token = self.cfg.get('token', '')\n self.header = self.cfg.get('header', '')\n self.method = self.cfg.get('method', '')\n self.parameters = self.cfg.get('parameters', [])\n self.description = self.cfg.get('description',\n 'This is a api tool that ...')\n self.responses_param = self.cfg.get('responses_param', [])\n try:\n all_para = {\n 'name': self.name,\n 'description': self.description,\n 'parameters': self.parameters\n }\n self.tool_schema = ToolSchema(**all_para)\n except ValidationError:\n raise ValueError(f'Error when parsing parameters of {self.name}')\n self._str = self.tool_schema.model_dump_json()\n self._function = self.parse_pydantic_model_to_openai_function(all_para)\n\n def _remote_call(self, *args, **kwargs):\n if self.url == '':\n raise ValueError(\n f\"Could not use remote call for {self.name} since this tool doesn't have a remote endpoint\"\n )\n\n remote_parsed_input = json.dumps(\n self._remote_parse_input(*args, **kwargs))\n origin_result = None\n if self.method == 'POST':\n retry_times = MAX_RETRY_TIMES\n while retry_times:\n retry_times -= 1\n try:\n print(f'data: {kwargs}')\n print(f'header: {self.header}')\n response = requests.request(\n 'POST',\n url=self.url,\n headers=self.header,\n data=remote_parsed_input)\n\n if response.status_code != requests.codes.ok:\n response.raise_for_status()\n origin_result = json.loads(\n response.content.decode('utf-8'))\n\n final_result = self._parse_output(\n origin_result, remote=True)\n return final_result\n except Timeout:\n continue\n except RequestException as e:\n raise ValueError(\n f'Remote call failed with error code: {e.response.status_code},\\\n error message: {e.response.content.decode(\"utf-8\")}')\n\n raise ValueError(\n 'Remote call max retry times exceeded! Please try to use local call.'\n )\n elif self.method == 'GET':\n retry_times = MAX_RETRY_TIMES\n\n new_url = self.url\n matches = re.findall(r'\\{(.*?)\\}', self.url)\n for match in matches:\n if match in kwargs:\n new_url = new_url.replace('{' + match + '}', kwargs[match])\n else:\n print(\n f'The parameter {match} was not generated by the model.'\n )\n\n while retry_times:\n retry_times -= 1\n try:\n print('GET:', new_url)\n print('GET:', self.url)\n\n response = requests.request(\n 'GET',\n url=new_url,\n headers=self.header,\n params=remote_parsed_input)\n if response.status_code != requests.codes.ok:\n response.raise_for_status()\n\n origin_result = json.loads(\n response.content.decode('utf-8'))\n\n final_result = self._parse_output(\n origin_result, remote=True)\n return final_result\n except Timeout:\n continue\n except RequestException as e:\n raise ValueError(\n f'Remote call failed with error code: {e.response.status_code},\\\n error message: {e.response.content.decode(\"utf-8\")}')\n\n raise ValueError(\n 'Remote call max retry times exceeded! Please try to use local call.'\n )\n else:\n raise ValueError(\n 'Remote call method is invalid!We have POST and GET method.')\n\n def _remote_parse_input(self, *args, **kwargs):\n restored_dict = {}\n for key, value in kwargs.items():\n if '.' in key:\n # Split keys by \".\" and create nested dictionary structures\n keys = key.split('.')\n temp_dict = restored_dict\n for k in keys[:-1]:\n temp_dict = temp_dict.setdefault(k, {})\n temp_dict[keys[-1]] = value\n else:\n # f the key does not contain \".\", directly store the key-value pair into restored_dict\n restored_dict[key] = value\n kwargs = restored_dict\n print('传给tool的参数:', kwargs)\n return kwargs"
}
] | import copy
import os
import gradio as gr
from config_utils import parse_configuration
from custom_prompt import (DEFAULT_EXEC_TEMPLATE, DEFAULT_SYSTEM_TEMPLATE,
DEFAULT_USER_TEMPLATE, CustomPromptGenerator,
parse_role_config)
from langchain.embeddings import ModelScopeEmbeddings
from langchain.vectorstores import FAISS
from modelscope_agent.agent import AgentExecutor
from modelscope_agent.agent_types import AgentType
from modelscope_agent.llm import LLMFactory
from modelscope_agent.retrieve import KnowledgeRetrieval
from modelscope_agent.tools.openapi_plugin import OpenAPIPluginTool | 8,548 |
# init user chatbot_agent
def init_user_chatbot_agent(uuid_str=''):
builder_cfg, model_cfg, tool_cfg, available_tool_list, plugin_cfg, available_plugin_list = parse_configuration(
uuid_str)
# set top_p and stop_words for role play
model_cfg[builder_cfg.model]['generate_cfg']['top_p'] = 0.5
model_cfg[builder_cfg.model]['generate_cfg']['stop'] = 'Observation'
# build model
print(f'using model {builder_cfg.model}')
print(f'model config {model_cfg[builder_cfg.model]}')
# # check configuration
# if builder_cfg.model in ['qwen-max', 'qwen-72b-api', 'qwen-14b-api', 'qwen-plus']:
# if 'DASHSCOPE_API_KEY' not in os.environ:
# raise gr.Error('DASHSCOPE_API_KEY should be set via setting environment variable')
try:
llm = LLMFactory.build_llm(builder_cfg.model, model_cfg)
except Exception as e:
raise gr.Error(str(e))
# build prompt with zero shot react template
|
# init user chatbot_agent
def init_user_chatbot_agent(uuid_str=''):
builder_cfg, model_cfg, tool_cfg, available_tool_list, plugin_cfg, available_plugin_list = parse_configuration(
uuid_str)
# set top_p and stop_words for role play
model_cfg[builder_cfg.model]['generate_cfg']['top_p'] = 0.5
model_cfg[builder_cfg.model]['generate_cfg']['stop'] = 'Observation'
# build model
print(f'using model {builder_cfg.model}')
print(f'model config {model_cfg[builder_cfg.model]}')
# # check configuration
# if builder_cfg.model in ['qwen-max', 'qwen-72b-api', 'qwen-14b-api', 'qwen-plus']:
# if 'DASHSCOPE_API_KEY' not in os.environ:
# raise gr.Error('DASHSCOPE_API_KEY should be set via setting environment variable')
try:
llm = LLMFactory.build_llm(builder_cfg.model, model_cfg)
except Exception as e:
raise gr.Error(str(e))
# build prompt with zero shot react template | instruction_template = parse_role_config(builder_cfg) | 5 | 2023-12-12 04:24:00+00:00 | 12k |
boweniac/autogan | autogan/agents/universal_agent.py | [
{
"identifier": "AgentSwitch",
"path": "autogan/agents/agent_switch.py",
"snippet": "class AgentSwitch:\n def __init__(\n self,\n organizational_structure: List,\n task_tag: Optional[str] = \"/task\",\n opening_speaker: Optional[any] = None,\n default_agent_config: Optional[Dict] = None,\n default_super_rich: Optional[str] = None,\n default_stream_mode: Optional[bool] = None,\n response_func: Optional[ResponseFuncType]\n = default_response_func,\n ):\n \"\"\"All messages sent by agents need to be forwarded through the AgentSwitch object.\n 所有 agent 发送的消息,都需要通过 AgentSwitch 对象进行转发。\n\n **Forwarding:**\n 转发:\n\n The AgentSwitch object determines who to forward the message to based on the agent name after the @ symbol in the message.\n AgentSwitch 对象通过消息中 @ 符号后的 agent name 来判断将消息转发给谁。\n\n **Conversation domain:**\n 会话域:\n\n In each round of dialogue, the agent does not need to use all historical conversation records as its context.\n 每轮对话 agent 无需将所有的历史会话记录作为其上下文。\n\n The agent's conversation domain is based on the task. that is, the context of each round of dialogue for the agent only focuses on the historical conversation records of the current task.\n agent 的会话域以任务为基础。即 agent 每轮对话的上下文仅聚焦于当前任务的历史会话记录。\n\n **Task:**\n 任务:\n\n The AgentSwitch object determines whether the content of the message is a task through the task tag in the message.\n AgentSwitch 对象通过消息中的 task tag,来判断消息的内容是否是一个任务。\n\n If it is a task, the AgentSwitch object will call the receiver's new_task method.\n 如果是任务,AgentSwitch 对象会调用接收方的 new_task 方法。\n\n The default task tag is /task, which can be modified through the task_tag parameter when initializing the AgentSwitch object.\n task tag 默认为 /task,该值可在初始化 AgentSwitch 对象时,通过 task_tag 参数修改。\n\n **Organizational structure:**\n 组织架构:\n\n A multidimensional list containing agent objects.\n 一个包含 agent 对象的多维列表。\n\n Each list is equivalent to a department, and the first agent in the list is the leader of the department.\n 每个列表相当于一个部门,列表中的第一个 agent 为部门的 leader。\n\n Each agent can communicate with other agents in the current department and the leader of the subordinate department to complete tasks together.\n 每个 agent 可与当前部门的其他 agent 以及下级部门的 leader 沟通,协作完成任务。\n\n Note: There cannot be agents with the same name in the organizational structure.\n 注意:组织架构中不能有相同名称的 agent。\n\n :param organizational_structure: A multidimensional list containing agent objects.\n 一个包含 agent 对象的多维列表。\n :param opening_speaker_name: The name of the human agent invited to publish the first task.\n 被邀请发布第一个任务的人工 agent 名称。\n :param task_tag: Publish tasks to other agents by adding task_tag to the message.\n 通过在消息中添加 task_tag 来向其他 agent 发布任务。\n \"\"\"\n self.task_tag = task_tag\n self._default_agent_config = default_agent_config\n self._default_super_rich = default_super_rich\n self._default_stream_mode = default_stream_mode\n self._response_func = response_func\n self._agents = {} # key: agent name value: agent object\n\n self._init_agents(organizational_structure)\n self._init_agents_workmates(organizational_structure)\n if opening_speaker:\n self._inviting_to_speak(opening_speaker)\n\n def _init_agents(self, agent_list: list):\n for item in agent_list:\n if isinstance(item, list):\n self._init_agents(item)\n elif isinstance(item, str):\n continue\n else:\n self._agents[item.name] = item\n if item.agent_config is None and self._default_agent_config is not None:\n item.set_agent_config(self._default_agent_config)\n if item.super_rich is None and self._default_super_rich is not None:\n item.super_rich = self._default_super_rich\n if item.stream_mode is None:\n if self._default_stream_mode is None or self._default_stream_mode:\n item.stream_mode = True\n else:\n item.stream_mode = False\n if self._response_func:\n item.response_func = self._response_func\n\n def _init_agents_workmates(self, agent_list: list):\n \"\"\"Arrange for each agent to communicate with other agents according to the organizational structure.\n 根据组织架构,为每个 agent 安排可以与其沟通的其他 agent\n\n An agent should not exist in multiple departments.\n agent 不应存在于多个部门中\n\n :param agent_list: Organizational structure\n 组织架构\n \"\"\"\n if isinstance(agent_list[0], str):\n # The current list is workflow mode\n l = len(agent_list)\n\n for index, main_agent in enumerate(agent_list):\n # Skip the first element\n if index == 0:\n continue\n\n workmates = \"\"\n\n if index == l - 1:\n # If this is the last element\n name = \"\\\\\"\n elif isinstance(agent_list[index + 1], list):\n # If the next element is a list\n name = agent_list[index + 1][0].name\n duty = agent_list[index + 1][0].duty\n workmates = f\"\"\"\n{name} : {duty}\"\"\"\n else:\n # If the next element is agent\n name = agent_list[index + 1].name\n duty = agent_list[index + 1].duty\n workmates = f\"\"\"\n{name} : {duty}\"\"\"\n\n if isinstance(main_agent, list):\n # If the current element is a list\n self._init_agents_workmates(main_agent)\n if not main_agent[0].pipeline or main_agent[0].pipeline == \"\\\\\":\n main_agent[0].workmates += workmates\n main_agent[0].pipeline = name\n else:\n # If the current element is agent\n if not main_agent.pipeline or main_agent.pipeline == \"\\\\\":\n main_agent.workmates += workmates\n main_agent.pipeline = name\n else:\n # The current list is non-workflow mode.\n for main_agent in agent_list:\n workmates = \"\"\n\n if isinstance(main_agent, list):\n # If the current element is a list\n self._init_agents_workmates(main_agent)\n\n # If the current element is a workflow list, no hierarchical relationship is established.\n if isinstance(main_agent[0], str):\n continue\n\n # Establish a leveling relationship between current department leaders\n for agent in agent_list:\n if isinstance(agent, list):\n # If other elements are lists\n\n if isinstance(agent[0], str):\n if agent[0] == \"F\":\n # If it is a workflow\n\n # Determine whether the second element is a list.\n if isinstance(agent[1], list):\n name = agent[1][0].name\n duty = agent[1][0].duty\n else:\n name = agent[1].name\n duty = agent[1].duty\n else:\n # Skip other types of workflow\n continue\n else:\n # If it is a department\n if agent[0].name != main_agent[0].name and agent[0].duty is not None:\n name = agent[0].name\n duty = agent[0].duty\n else:\n # Skip departments that duplicate the current department\n continue\n else:\n # If other elements are agent\n name = agent.name\n duty = agent.duty\n workmates += f\"\"\"\n{name} : {duty}\"\"\"\n main_agent[0].workmates += workmates\n else:\n # If the current element is agent\n\n # Establish a level relationship of the current agent\n for agent in agent_list:\n if isinstance(agent, list):\n # If other elements are lists\n\n # Determine whether it is a department or a workflow\n if isinstance(agent[0], str):\n if agent[0] == \"F\":\n # If it is a workflow\n\n # Determine whether the second element is a list.\n if isinstance(agent[1], list):\n name = agent[1][0].name\n duty = agent[1][0].duty\n else:\n name = agent[1].name\n duty = agent[1].duty\n else:\n # Skip other types of workflow\n continue\n else:\n # If it is a department\n name = agent[0].name\n duty = agent[0].duty\n else:\n # If other elements are agent\n if agent.name != main_agent.name and agent.duty is not None:\n name = agent.name\n duty = agent.duty\n else:\n # Skip the duplicate agent with the current agent\n continue\n workmates += f\"\"\"\n{name} : {duty}\"\"\"\n main_agent.workmates += workmates\n\n def _inviting_to_speak(self, invited_speaker):\n \"\"\"Invite the human agent to publish the first task\n 邀请人工 agent 发布第一个任务\n\n :param invited_speaker_name: The name of the human agent\n 人工 agent 名称。\n \"\"\"\n if invited_speaker.name not in self._agents:\n print(\"agent does not exist\")\n return\n new_task_id = self.create_time_based_uuid()\n invited_speaker.receive(self, new_task_id, \"system\", \"Please enter\", 2)\n\n def handle_and_forward(self, task_id: str, pusher_name: str, content: str,\n completion_tokens: Optional[int]):\n \"\"\"Handle messages and forward to other agent.\n 处理消息并转发给其他代理\n\n **Forwarding:**\n 转发:\n Determines who to forward the message to based on the agent name after the @ symbol in the message.\n 通过消息中 @ 符号后的 agent name 来判断将消息转发给谁。\n\n **Task:**\n 任务:\n Determines whether the content of the message is a task through the task tag in the message.\n 通过消息中的 task tag,来判断消息的内容是否是一个任务。\n\n If it is a task, will call the receiver's new_task method.\n 如果是任务,对象会调用接收方的 new_task 方法。\n\n **Conversation domain control:**\n 会话域控制:\n Translate the task id of the pusher into the task id of the receiver to connect the context.\n 将推送方的任务 id,转换为接收方的任务 id,以衔接上下文。\n\n - If the pusher is the task publisher, it is necessary to convert the task id of the pusher into the sub-task id of the receiver.\n - 如推送方为任务发布者,则需要将推送方的任务 id 转换为接收方的子任务 id。\n\n - If the pusher is executing the task published by the receiver, it is necessary to convert the task id of the pusher into the parent task id of the receiver.\n - 如推送方正在执行接收方发布的任务,则需要将推送方的任务 id 转换为接收方的上级任务 id。\n\n :param task_id: pusher task id.\n :param pusher_name: pusher_name.\n :param content: message content.\n :param completion_tokens: message content tokens.\n \"\"\"\n # Get pusher object.\n pusher = self._agents[pusher_name]\n\n # Recognize the recipient's name.\n match = re.findall(r'@(\\w+)', content)\n\n if match:\n if match[0] not in self._agents:\n # Handling the case of incorrect recipient name.\n warn = f\"@{pusher_name} {match[0]} not exist, do not @{match[0]} again, Also please do not attempt to converse with me, this is just a system message.\"\n self._response_func(\"system\", \"system\", \"\", False, 0, warn, 0, None)\n pusher.receive(self, task_id, \"system\", warn, 12)\n\n # Get receiver object.\n receiver = self._agents[match[0]]\n if re.search(fr'@\\w+ {self.task_tag}', content):\n # Generate a new task id.\n new_task_id = self.create_time_based_uuid()\n\n # Establish a relationship between the push task and the receiver task.\n pusher.sub_to_main_task_id[new_task_id] = task_id\n receiver.main_to_sub_task_id[task_id] = new_task_id\n # Create a new task.\n receiver.new_task(self, new_task_id, pusher_name, content, completion_tokens)\n else:\n switch_task_id = task_id\n if receiver.main_to_sub_task_id and task_id in receiver.main_to_sub_task_id:\n # Translate the session ID of the pusher into the sub-session ID of the receiver.\n switch_task_id = receiver.main_to_sub_task_id[task_id]\n if receiver.main_to_sub_task_id and task_id in receiver.sub_to_main_task_id:\n # Translate the session id of the sender into the superior session id of the receiver.\n switch_task_id = receiver.sub_to_main_task_id[task_id]\n if switch_task_id == task_id:\n # If no subtasks of the task from the pusher are found, a prompt is needed to create the task first.\n # Generate a new task id.\n new_task_id = self.create_time_based_uuid()\n\n # Establish a relationship between the push task and the receiver task.\n pusher.sub_to_main_task_id[new_task_id] = task_id\n receiver.main_to_sub_task_id[task_id] = new_task_id\n # Create a new task.\n content = content.replace(f\"@{match[0]} \", f\"@{match[0]} {self.task_tag} \")\n receiver.new_task(self, new_task_id, pusher_name, content, completion_tokens)\n else:\n receiver.receive(self, switch_task_id, pusher_name, content, completion_tokens)\n else:\n # Handling the situation where the recipient is not recognized.\n if pusher.pipeline != \"\\\\\":\n warn = f\"@{pusher_name} Any reply must start with @ + recipient's name, Also please do not attempt to converse with me, this is just a system message.\"\n self._response_func(\"system\", \"system\", \"\", False, 0, warn, 0, None)\n pusher.receive(self, task_id, \"system\", warn, 12)\n\n @staticmethod\n def create_time_based_uuid():\n # 获取当前时间的时间戳\n timestamp = time.time()\n\n # 创建一个基于时间戳的UUID\n return uuid.uuid5(uuid.NAMESPACE_DNS, str(timestamp))"
},
{
"identifier": "compressed_messages",
"path": "autogan/utils/compressed_messages_utils.py",
"snippet": "def compressed_messages(messages: List[Dict], focus: str, summary_model_config: LLMConfig, agent_name: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None,\n safe_size: Optional[int] = 4096) -> tuple[Optional[list], Optional[list], Optional[int]]:\n \"\"\"Compress Conversation Context\n 压缩会话上下文\n\n The content to be compressed is divided into: recent original conversation content, and distant content that needs to be compressed.\n 待压缩的会话内容会被分为:近期的原始会话内容、远期需要压缩的会话内容。\n\n When compressing distant conversation records, attention is focused on the 'focus'\n 在压缩远期会话记录时,会将注意力集中于 focus\n\n **Recent Original Conversation Content:**\n 近期原始会话内容:\n\n First, traverse the 'messages' in reverse order, extract the recent conversation records, until the cumulative tokens of the conversation records exceed 50% of the 'safe_size'\n 先反向遍历 messages,提取近期的会话记录,直至会话记录的累计 tokens 超过 safe_size 的 50%\n\n If the tokens of the first recent conversation record exceed 50% of the 'safe_size', then directly extract the first recent conversation record\n 如近期第一条会话记录的 tokens 就超过了 safe_size 的 50% 则直接提取近期第一条会话记录\n\n **Distant Compressed Conversation Content:**\n 远期压缩会话内容:\n\n The remaining conversation records will be compressed as distant conversation records. The size after compression is expected to be within the range of ('safe_size' - cumulative original conversation tokens)\n 剩余的会话记录将作为远期会话记录进行压缩,压缩后的大小被期望保持在 (safe_size - 累计原始会话 tokens) 范围之内\n\n If the value of 'safe_size' - cumulative original conversation tokens is less than 0, then the size after compression is expected to be 1024 tokens\n 如 safe_size - 累计原始会话 tokens 的值小于 0 则压缩后的大小被期望保持在 1024 tokens\n\n Note: The compression process does not treat messages from the 'system' role specially, and they should be excluded from 'messages'.\n 注意:压缩过程并未对 system 角色的消息进行特殊处理,应将其排除在 messages 之外。\n\n :param messages: The conversation content to be compressed, excluding 'system message' and 'focus message'. It should include 'role', 'content', 'tokens' fields.\n 待压缩的会话内容,应排除掉 system message 和 focus message。需包含 'role','content','tokens' 字段。\n :param focus: The focus direction when compressing distant conversation records\n 压缩远期会话记录时的专注方向\n :param summary_model_config: The LLM model configuration used to compress distant conversation records\n 用于压缩远期会话记录的 LLM 模型配置\n :param agent_name:\n :param response_func: Used to return results to the interface or terminal.\n 用于向接口或终端返回结果\n :param stream_mode:\n :param safe_size: 'max_messages_tokens' of 'agent main model' minus the tokens of 'system message' and 'focus message'. When 'safe_size' is less than 0, it will be forcibly defined as 1024\n agent main model 的 max_messages_tokens 减去 system message 和 focus message 的 tokens,当 safe_size 小于 0 时,将被强制定义为 1024\n\n :return:\n --conversation_messages: The compressed conversation records, the difference from 'request_messages' is that the 'tokens' field of each message is retained\n 压缩后的会话记录,与 request_messages 的区别是保留了每条消息的 tokens 字段\n --request_messages: The message content requested to 'llm', removed the 'tokens' field of each message\n 用于向 llm 请求的消息内容,去掉了每条消息的 tokens 字段\n --total_tokens: The total tokens after compression\n 压缩后的整体tokens\n \"\"\"\n conversation_messages = []\n request_messages = []\n total_tokens = 0\n\n if len(messages) == 0:\n return None, None, None\n\n if safe_size < 0:\n safe_size = 1024\n # Reverse traverse the message to extract recent original conversation content.\n i = 0\n for message in reversed(messages):\n tokens = message[\"tokens\"]\n if total_tokens + tokens > int(safe_size * 0.5) and i != 0:\n break\n message_copy = message.copy()\n message_copy.pop('tokens', None)\n conversation_messages.insert(0, message)\n request_messages.insert(0, message_copy)\n total_tokens += tokens\n i -= 1\n # Compress the remaining messages as distant conversation records.\n if len(messages) > (i * -1):\n compressed_size = safe_size - total_tokens\n if compressed_size <= 0:\n compressed_size = 1024\n\n # 压缩剩余 messages\n content, tokens = generate_messages_summary(messages[:i], focus, summary_model_config, compressed_size, agent_name, response_func, stream_mode)\n\n if content:\n conversation_messages.insert(\n 0,\n {'role': 'assistant', 'content': f'Earlier historical conversation records: {content}',\n 'tokens': tokens}\n )\n request_messages.insert(\n 0,\n {'role': 'assistant', 'content': f'Earlier historical conversation records: {content}'}\n )\n total_tokens += tokens\n if conversation_messages and request_messages:\n return conversation_messages, request_messages, total_tokens\n else:\n return None, None, None"
},
{
"identifier": "compressed_text_universal",
"path": "autogan/utils/compressed_text_utils.py",
"snippet": "def compressed_text_universal(text: str, summary_model_config: LLMConfig, agent_name: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None,\n focus: Optional[str] = None, safe_size: Optional[int] = None) \\\n -> tuple[Optional[str], Optional[int]]:\n \"\"\"Compress the text, generating either a regular summary or a cue summary.\n 压缩文本,可生成普通摘要或线索摘要。\n\n First, the long text is sliced, and then a summary is generated for each slice.\n 首先将长文本切片,然后逐切片的生成摘要。\n\n If the value of the focus parameter is not None, then the attention will be focused on the focus area while generating the summary.\n 如 focus 参数的值不为 None 则在生成摘要时注意力集中于 focus。\n\n If the value of the safe_size parameter is not None and the length of the initial compression result exceeds the safe_size, the summary will be further compressed, with the compressed size expected to stay within the range of the safe_size.\n 如 safe_size 参数的值不为 None 且初次压缩结果长度超过 safe_size,则会对摘要进一步压缩,压缩后的大小被期望保持在 safe_size 范围之内。\n\n :param text: Text to be compressed.\n 待压缩的文本。\n :param summary_model_config: LLM configuration used for text compression.\n 用于压缩文本的 LLM 配置。\n :param agent_name:\n :param response_func: Used to return results to the interface or terminal.\n 用于向接口或终端返回结果\n :param stream_mode:\n :param focus: The focus direction when compressing text.\n 压缩文本时的专注方向。\n :param safe_size: The target size of the text after compression, if not provided there is no limit.\n 文本压缩后的目标尺寸,如果为空则不做限制。\n\n :return:\n --compressed_text: The text after compression.\n 压缩后的文本。\n --total_tokens: Total tokens after compression.\n 压缩后的整体tokens。\n \"\"\"\n\n compressed_text = \"\"\n total_tokens = 0\n\n split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model)\n\n for st in split_texts:\n if focus:\n content, tokens = generate_text_clues(st, focus, summary_model_config, agent_name, response_func,\n stream_mode)\n else:\n content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode)\n\n if content:\n compressed_text += content + \"\\n\"\n total_tokens += tokens\n\n if compressed_text:\n if safe_size and safe_size < total_tokens:\n return compressed_text_into_safe_size(compressed_text, safe_size, summary_model_config, agent_name,\n response_func, stream_mode)\n else:\n return compressed_text, total_tokens\n else:\n return None, None"
},
{
"identifier": "AgentConfig",
"path": "autogan/oai/config_utils.py",
"snippet": "class AgentConfig:\n \"\"\"The agent configuration includes:\n agent 配置包括:\n\n - main_model: The LLM configuration of the agent's main body.\n agent 主体的 LLM 配置。\n\n - summary_model: The LLM configuration used for compressing context and generating text summaries.\n 用于压缩上下文以及生成文本摘要的 LLM 配置。\n\n - request_interval_time: The interval time of LLM requests.\n LLM 请求间隔时间。\n\n - request_timeout:The timeout of LLM requests.\n LLM 请求超时时间。\n\n - max_retries: The maximum number of retries for LLM requests.\n LLM 请求最大重试次数。\n \"\"\"\n\n def __init__(\n self,\n config: Dict,\n ):\n model_filter = config[\"main_model\"].get(\"model_filter\", \"\")\n # main model config\n self._main_model_api_key_list = ConfigList(config[\"main_model\"][\"api_key_list\"], model_filter)\n self._main_model_max_messages_tokens = config[\"main_model\"][\"max_messages_tokens\"]\n\n # summary model config\n if \"summary_model\" in config:\n model_filter = config[\"summary_model\"].get(\"model_filter\", \"\")\n self._summary_model_api_key_list = ConfigList(config[\"summary_model\"][\"api_key_list\"], model_filter)\n self._summary_model_max_messages_tokens = config[\"summary_model\"][\"max_messages_tokens\"]\n else:\n # Use the main_model configuration when the summary_model configuration is empty.\n self._summary_model_api_key_list = self._main_model_api_key_list\n self._summary_model_max_messages_tokens = self._main_model_max_messages_tokens\n\n self._request_interval_time = config[\"request_interval_time\"]\n self._request_timeout = config[\"request_timeout\"]\n self._max_retries = config[\"max_retries\"]\n\n @property\n def main_model_config(self):\n return LLMConfig(\n self._main_model_api_key_list,\n self._main_model_max_messages_tokens,\n self._request_interval_time,\n self._request_timeout,\n self._max_retries\n )\n\n @property\n def summary_model_config(self):\n return LLMConfig(\n self._summary_model_api_key_list,\n self._summary_model_max_messages_tokens,\n self._request_interval_time,\n self._request_timeout,\n self._max_retries\n )"
},
{
"identifier": "count_text_tokens",
"path": "autogan/oai/count_tokens_utils.py",
"snippet": "def count_text_tokens(text: str, model: Optional[str] = \"gpt-3.5-turbo\") -> int:\n \"\"\"Calculate the tokens of the text.\n\n :param text: The text to be tokenized\n :param model: Calculate tokens for a specific model. If the model is not listed, it will default to calculating the number of tokens based on the gpt-3.5-turbo standard.\n\n :return: tokens\n \"\"\"\n\n if not text:\n return 0\n\n model_list = ['gpt-4', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo']\n if model not in model_list:\n model = \"gpt-3.5-turbo\"\n\n try:\n encoding = tiktoken.encoding_for_model(model)\n num_tokens = len(encoding.encode(text))\n except Exception as e:\n print(e)\n num_tokens = 0\n\n return num_tokens"
},
{
"identifier": "generate_chat_completion",
"path": "autogan/oai/generate_utils.py",
"snippet": "def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\\\n -> tuple[Optional[str], Optional[int]]:\n \"\"\"Call the LLM interface\n\n Currently, only the chatgpt model of openai (including azure) is adapted.\n\n :param llm_config: LLM configuration.\n :param messages:\n :param agent_name:\n :param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries\n - main: agent replies\n - idea: deep thoughts\n - messages_summary: context compression\n - text_summary: general summaries\n - clue_summary: clue summaries\n :param response_func: Used to return results to the interface or terminal.\n :param stream_mode:\n \"\"\"\n\n # When a certain configuration in the configuration list fails to request,\n # continue to try the next configuration until all configurations in the list are attempted.\n loop = llm_config.len_of_api_key_list\n for i in range(loop):\n time.sleep(llm_config.request_interval_time)\n api_key = llm_config.next_api_key\n try:\n completion_content = \"\"\n completion_tokens = 0\n index = 1\n for message in chat_completions(messages, api_key, llm_config.request_timeout,\n llm_config.max_retries, stream_mode):\n content = \"\"\n if stream_mode:\n if (message and \"choices\" in message and \"delta\" in message[\"choices\"][0]\n and \"content\" in message[\"choices\"][0][\"delta\"]\n and message[\"choices\"][0][\"delta\"][\"content\"]):\n content = message[\"choices\"][0][\"delta\"][\"content\"]\n completion_content += content\n else:\n if (message and \"choices\" in message and \"message\" in message[\"choices\"][0]\n and \"content\" in message[\"choices\"][0][\"message\"]\n and message[\"choices\"][0][\"message\"][\"content\"]):\n content = message[\"choices\"][0][\"message\"][\"content\"]\n completion_content = content\n if message and \"usage\" in message and \"completion_tokens\" in message[\"usage\"]:\n completion_tokens = message[\"usage\"][\"completion_tokens\"]\n response_func(agent_name, gen, api_key[\"model\"], stream_mode, index, content, completion_tokens, message)\n if content:\n index += 1\n\n if completion_content:\n if completion_tokens == 0:\n completion_tokens = count_text_tokens(completion_content, api_key['model'])\n return completion_content, completion_tokens\n else:\n raise ValueError(\"The return value is empty.\")\n except Exception as e:\n if i == loop - 1:\n print(f\"generate_chat_completion Exception: {e}\")\n return None, None"
},
{
"identifier": "environment_info",
"path": "autogan/utils/environment_utils.py",
"snippet": "def environment_info() -> str:\n \"\"\"Current environment information\n\n :return: --current_time: Y.m.d H:M:S week:%w\n \"\"\"\n info = f'current time: {get_time()}'\n\n return info"
},
{
"identifier": "default_response_func",
"path": "autogan/utils/response.py",
"snippet": "def default_response_func(agent_name: str, gen: str, model: str, stream_mode: bool, index: int,\n content: Optional[str], tokens: Optional[int], response: any):\n \"\"\"default response function\n 默认响应函数提供终端打印支持\n The default response function provides terminal printing support.\n\n :param agent_name:\n :param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries\n 用于区分 agent 回复、深思、压缩上下文、普通摘要、线索摘要\n - main: agent replies\n - idea: deep thoughts\n - messages_summary: context compression\n - text_summary: general summaries\n - clue_summary: clue summaries\n - system:\n - tool:\n - tool_call:\n :param model:\n :param stream_mode:\n :param index: response sequence\n :param content: completion content\n 生成内容\n :param tokens: completion tokens\n 生成内容的 tokens\n :param response: Respond to raw data\n 响应原始数据\n :return:\n \"\"\"\n if stream_mode:\n end = \"\"\n else:\n end = \"\\n\"\n\n if content:\n if gen == \"main\":\n if index == 1:\n print(f\"\\n{agent_name}: \", end=end)\n print(content, end=end)\n elif gen == \"idea\" or gen == \"tool_call\":\n if index == 1:\n print(\n colored(\n f\"\\n{agent_name}: \",\n \"cyan\",\n ),\n end=end,\n flush=True,\n )\n print(\n colored(\n content,\n \"cyan\",\n ),\n end=end,\n flush=True,\n )\n elif gen == \"system\":\n print(\n colored(\n f\"\\n{agent_name}: {content}\",\n \"red\",\n ),\n end=end,\n flush=True,\n )\n elif gen == \"tool\":\n print(\n colored(\n f\"\\n{agent_name}: {content}\",\n \"blue\",\n ),\n end=end,\n flush=True,\n )\n elif gen == \"search\":\n print(\n colored(\n f\"\\nurl: {content}\",\n \"cyan\",\n ),\n end=end,\n flush=True,\n )"
}
] | import re
from collections import defaultdict
from typing import Optional, Dict, Any
from autogan.agents.agent_switch import AgentSwitch
from autogan.utils.compressed_messages_utils import compressed_messages
from autogan.utils.compressed_text_utils import compressed_text_universal
from autogan.oai.config_utils import AgentConfig
from autogan.oai.count_tokens_utils import count_text_tokens
from autogan.oai.generate_utils import generate_chat_completion
from autogan.utils.environment_utils import environment_info
from autogan.utils.response import default_response_func
from termcolor import colored | 9,319 |
try:
except ImportError:
def colored(x, *args, **kwargs):
return x
class UniversalAgent:
def __init__(
self,
name: str,
agent_config: Optional[Dict] = None,
duty: Optional[str] = None,
work_flow: Optional[str] = None,
use_tool: Optional[str] = None, # only | join
super_rich: Optional[str] = None, # auto | on | off
stream_mode: Optional[bool] = None,
):
"""Agent base class
Each agent can communicate with other agents in the current department and the leader of the subordinate department to complete tasks together.
每个 agent 可与当前部门的其他 agent 以及下级部门的 leader 沟通,协作完成任务。
To provide functions beyond the modeling capabilities for the agent, you can override the tool_function method.
想要为 agent 提供模型能力之外的功能,可以通过重写 tool_function 方法来实现。
:param name: The agent name should be unique in the organizational structure.
agent name 在组织架构中应当是唯一的。
:param agent_config: The agent configuration includes:
agent 配置包括:
- main_model: The LLM configuration of the agent's main body.
agent 主体的 LLM 配置。
- summary_model: The LLM configuration used for compressing context and generating text summaries.
用于压缩上下文以及生成文本摘要的 LLM 配置。
- request_interval_time: The interval time of LLM requests.
LLM 请求间隔时间。
- request_timeout:The timeout of LLM requests.
LLM 请求超时时间。
- max_retries: The maximum number of retries for LLM requests.
LLM 请求最大重试次数。
:param duty: Used to explain one's job responsibilities to other agents.
用于向其他 agent 说明自己的工作职责。
:param work_flow: Defines the workflow of the agent.
定义 agent 的工作流程。
:param use_tool: Defines the mode of the agent using the tool_function:
定义 agent 使用 tool_function 的模式:
- None: means not using the tool function.
不使用工具函数。
- only: Do not use the LLM, only use the tool function to generate results.
不使用 LLM,仅使用工具函数生成结果。
- join: The content generated by the LLM will be used as the input parameter for the tool_function.
LLM 生成的内容将作为 tool_function 的输入参数
:param super_rich: Whether to enable the deep thought function. When enabled,
it uses a set of analysis processes to refine the output of the agent. However,
this can increase the number of tokens used, so it is not recommended for use with the gpt-4 model.
The name "super_rich" is a reminder that using this function with gpt-4 can be expensive,
even more so than Elon Musk's earning speed.
是否开启深思功能,开启后会使用一套分析流程来收敛 agent 的输出结果,但这样做会增加 tokens 的消耗,因此不建议在gpt-4模型下使用。
之所以这个参数叫 super_rich ,是为了提醒用户,如果在 gpt-4 下使用,其花钱的速度可能会超过马斯克赚钱的速度。
- auto: Disable for GPT-4, enable for other models
在 gpt-4下禁用,其他模型开启
- on: Always enabled
始终开启
- off: Always disabled
始终关闭
:param stream_mode: Whether to enable the stream_mode
定义 agent 的工作流程。
"""
self.name = name
|
try:
except ImportError:
def colored(x, *args, **kwargs):
return x
class UniversalAgent:
def __init__(
self,
name: str,
agent_config: Optional[Dict] = None,
duty: Optional[str] = None,
work_flow: Optional[str] = None,
use_tool: Optional[str] = None, # only | join
super_rich: Optional[str] = None, # auto | on | off
stream_mode: Optional[bool] = None,
):
"""Agent base class
Each agent can communicate with other agents in the current department and the leader of the subordinate department to complete tasks together.
每个 agent 可与当前部门的其他 agent 以及下级部门的 leader 沟通,协作完成任务。
To provide functions beyond the modeling capabilities for the agent, you can override the tool_function method.
想要为 agent 提供模型能力之外的功能,可以通过重写 tool_function 方法来实现。
:param name: The agent name should be unique in the organizational structure.
agent name 在组织架构中应当是唯一的。
:param agent_config: The agent configuration includes:
agent 配置包括:
- main_model: The LLM configuration of the agent's main body.
agent 主体的 LLM 配置。
- summary_model: The LLM configuration used for compressing context and generating text summaries.
用于压缩上下文以及生成文本摘要的 LLM 配置。
- request_interval_time: The interval time of LLM requests.
LLM 请求间隔时间。
- request_timeout:The timeout of LLM requests.
LLM 请求超时时间。
- max_retries: The maximum number of retries for LLM requests.
LLM 请求最大重试次数。
:param duty: Used to explain one's job responsibilities to other agents.
用于向其他 agent 说明自己的工作职责。
:param work_flow: Defines the workflow of the agent.
定义 agent 的工作流程。
:param use_tool: Defines the mode of the agent using the tool_function:
定义 agent 使用 tool_function 的模式:
- None: means not using the tool function.
不使用工具函数。
- only: Do not use the LLM, only use the tool function to generate results.
不使用 LLM,仅使用工具函数生成结果。
- join: The content generated by the LLM will be used as the input parameter for the tool_function.
LLM 生成的内容将作为 tool_function 的输入参数
:param super_rich: Whether to enable the deep thought function. When enabled,
it uses a set of analysis processes to refine the output of the agent. However,
this can increase the number of tokens used, so it is not recommended for use with the gpt-4 model.
The name "super_rich" is a reminder that using this function with gpt-4 can be expensive,
even more so than Elon Musk's earning speed.
是否开启深思功能,开启后会使用一套分析流程来收敛 agent 的输出结果,但这样做会增加 tokens 的消耗,因此不建议在gpt-4模型下使用。
之所以这个参数叫 super_rich ,是为了提醒用户,如果在 gpt-4 下使用,其花钱的速度可能会超过马斯克赚钱的速度。
- auto: Disable for GPT-4, enable for other models
在 gpt-4下禁用,其他模型开启
- on: Always enabled
始终开启
- off: Always disabled
始终关闭
:param stream_mode: Whether to enable the stream_mode
定义 agent 的工作流程。
"""
self.name = name | self.agent_config = AgentConfig(agent_config) if agent_config else None | 3 | 2023-12-06 03:24:34+00:00 | 12k |
JingHao99/IDR-Ingredients-oriented-Degradation-Reformulation | inference.py | [
{
"identifier": "AverageMeter",
"path": "utils/metric_util.py",
"snippet": "class AverageMeter():\r\n \"\"\" Computes and stores the average and current value \"\"\"\r\n\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n \"\"\" Reset all statistics \"\"\"\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n \"\"\" Update statistics \"\"\"\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r"
},
{
"identifier": "save_img_tensor",
"path": "utils/tensor_op.py",
"snippet": "def save_img_tensor(restored,result_dir,ippath):\r\n '''\r\n :param restored: (1,C,H,W)\r\n :param result_dir:\r\n :param ippath:\r\n :return:\r\n '''\r\n restored = torch.clamp(restored, 0, 1).cpu().detach().permute(0, 2, 3, 1).squeeze(0).numpy()\r\n util.save_img(img_as_ubyte(restored),util.Generate_rp(result_dir,ippath))\r"
},
{
"identifier": "save_image_tensor",
"path": "utils/tensor_op.py",
"snippet": "def save_image_tensor(image_tensor, output_path=\"output/\"):\r\n image_np = torch_to_np(image_tensor)\r\n p = np_to_pil(image_np)\r\n p.save(output_path)\r"
},
{
"identifier": "mkdir",
"path": "utils/util.py",
"snippet": "def mkdir(path):\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r"
},
{
"identifier": "setup_logger",
"path": "utils/util.py",
"snippet": "def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False, tofile=False):\r\n '''\r\n util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,\r\n screen=True, tofile=True)\r\n logger = logging.getLogger('base')\r\n logger.info(option.dict2str(opt))\r\n '''\r\n lg = logging.getLogger(logger_name)\r\n fmt = '%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s'\r\n color_fmt = colored('%(asctime)s.%(msecs)03d','green') + '- %(levelname)s: %(message)s'\r\n formatter = logging.Formatter(fmt=color_fmt,\r\n datefmt='%y-%m-%d %H:%M:%S')\r\n lg.setLevel(level)\r\n lg.propagate = False\r\n if tofile:\r\n log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))\r\n fh = logging.FileHandler(log_file, mode='w')\r\n fh.setFormatter(formatter)\r\n lg.addHandler(fh)\r\n if screen:\r\n sh = logging.StreamHandler()\r\n sh.setFormatter(formatter)\r\n lg.addHandler(sh)\r"
},
{
"identifier": "crop_HWC_img",
"path": "utils/data_util.py",
"snippet": "def crop_HWC_img(image, base=64):\r\n \"\"\"\r\n 裁切到multiple of base的size上\r\n :param image: H,W,C\r\n :param base: (int)\r\n :return:\r\n \"\"\"\r\n h = image.shape[0]\r\n w = image.shape[1]\r\n crop_h = h % base\r\n crop_w = w % base\r\n return image[crop_h // 2:h - crop_h + crop_h // 2, crop_w // 2:w - crop_w + crop_w // 2, :]\r"
},
{
"identifier": "random_augmentation",
"path": "utils/data_util.py",
"snippet": "def random_augmentation(*args):\r\n out = []\r\n flag_aug = random.randint(0,7)\r\n for data in args:\r\n out.append(data_augmentation(data, flag_aug).copy())\r\n return out\r"
},
{
"identifier": "tensor2img",
"path": "utils/data_util.py",
"snippet": "def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):\r\n \"\"\"Convert torch Tensors into image numpy arrays.\r\n\r\n After clamping to [min, max], values will be normalized to [0, 1].\r\n\r\n Args:\r\n tensor (Tensor or list[Tensor]): Accept shapes:\r\n 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);\r\n 2) 3D Tensor of shape (3/1 x H x W);\r\n 3) 2D Tensor of shape (H x W).\r\n Tensor channel should be in RGB order.\r\n rgb2bgr (bool): Whether to change rgb to bgr.\r\n out_type (numpy type): output types. If ``np.uint8``, transform outputs\r\n to uint8 type with range [0, 255]; otherwise, float type with\r\n range [0, 1]. Default: ``np.uint8``.\r\n min_max (tuple[int]): min and max values for clamp.\r\n\r\n Returns:\r\n (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of\r\n shape (H x W). The channel order is BGR.\r\n \"\"\"\r\n if not (torch.is_tensor(tensor) or\r\n (isinstance(tensor, list)\r\n and all(torch.is_tensor(t) for t in tensor))):\r\n raise TypeError(\r\n f'tensor or list of tensors expected, got {type(tensor)}')\r\n\r\n if torch.is_tensor(tensor):\r\n tensor = [tensor]\r\n result = []\r\n for _tensor in tensor:\r\n _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)\r\n _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])\r\n\r\n n_dim = _tensor.dim()\r\n if n_dim == 4:\r\n img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()\r\n img_np = img_np.transpose(1, 2, 0)\r\n if rgb2bgr:\r\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\r\n elif n_dim == 3:\r\n img_np = _tensor.numpy()\r\n img_np = img_np.transpose(1, 2, 0)\r\n if img_np.shape[2] == 1: # gray image\r\n img_np = np.squeeze(img_np, axis=2)\r\n else:\r\n if rgb2bgr:\r\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\r\n elif n_dim == 2:\r\n img_np = _tensor.numpy()\r\n else:\r\n raise TypeError('Only support 4D, 3D or 2D tensor. '\r\n f'But received with dimension: {n_dim}')\r\n if out_type == np.uint8:\r\n # Unlike MATLAB, numpy.unit8() WILL NOT round by default.\r\n img_np = (img_np * 255.0).round()\r\n img_np = img_np.astype(out_type)\r\n result.append(img_np)\r\n if len(result) == 1:\r\n result = result[0]\r\n return result\r"
},
{
"identifier": "compute_psnr_ssim",
"path": "metrics/psnr_ssim.py",
"snippet": "def compute_psnr_ssim(recoverd, clean):\r\n \"\"\"\r\n model.output输入\r\n \"\"\"\r\n assert recoverd.shape == clean.shape\r\n recoverd = np.clip(recoverd.detach().cpu().numpy(), 0, 1)\r\n clean = np.clip(clean.detach().cpu().numpy(), 0, 1)\r\n\r\n recoverd = recoverd.transpose(0, 2, 3, 1)\r\n clean = clean.transpose(0, 2, 3, 1)\r\n psnr = 0\r\n ssim = 0\r\n\r\n for i in range(recoverd.shape[0]):\r\n # psnr_val += compare_psnr(clean[i], recoverd[i])\r\n # ssim += compare_ssim(clean[i], recoverd[i], multichannel=True)\r\n psnr += peak_signal_noise_ratio(clean[i], recoverd[i], data_range=1)\r\n ssim += structural_similarity(clean[i], recoverd[i], data_range=1, multichannel=True)\r\n\r\n return psnr / recoverd.shape[0], ssim / recoverd.shape[0], recoverd.shape[0]\r"
},
{
"identifier": "calculate_psnr",
"path": "metrics/psnr_ssim.py",
"snippet": "def calculate_psnr(img1, img2, crop_border=0, test_y_channel=False):\r\n \"\"\"img1 and img2 have range [0, 255] np.uint8\r\n tensor2img后输入\r\n crop_border (int): Cropped pixels in each edge of an image. These\r\n pixels are not involved in the PSNR calculation.\r\n test_y_channel (bool): Test on Y channel of YCbCr. Default: False.\r\n\r\n Returns:\r\n float: psnr result.\r\n \"\"\"\r\n img1 = img1.astype(np.float64)\r\n img2 = img2.astype(np.float64)\r\n if crop_border != 0:\r\n img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]\r\n img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]\r\n if test_y_channel:\r\n img1 = to_y_channel(img1)\r\n img2 = to_y_channel(img2)\r\n\r\n mse = np.mean((img1 - img2)**2)\r\n if mse == 0:\r\n return float('inf')\r\n return 20 * math.log10(255.0 / math.sqrt(mse))\r"
},
{
"identifier": "calculate_ssim",
"path": "metrics/psnr_ssim.py",
"snippet": "def calculate_ssim(img1, img2):\r\n '''calculate SSIM\r\n the same outputs as MATLAB's\r\n img1, img2: [0, 255]\r\n '''\r\n if not img1.shape == img2.shape:\r\n raise ValueError('Input images must have the same dimensions.')\r\n if img1.ndim == 2:\r\n return ssim(img1, img2)\r\n elif img1.ndim == 3:\r\n if img1.shape[2] == 3:\r\n ssims = []\r\n for i in range(3):\r\n ssims.append(ssim(img1, img2))\r\n return np.array(ssims).mean()\r\n elif img1.shape[2] == 1:\r\n return ssim(np.squeeze(img1), np.squeeze(img2))\r\n else:\r\n raise ValueError('Wrong input image dimensions.')\r"
},
{
"identifier": "IDR_restormer",
"path": "models/archs/IDR_restormer_arch.py",
"snippet": "class IDR_restormer(nn.Module):\n def __init__(self,\n inp_channels=3,\n out_channels=3,\n dim=48,\n num_blocks=[4, 6, 6, 8],\n num_refinement_blocks=4,\n heads=[1, 2, 4, 8],\n ffn_expansion_factor=2.66,\n bias=False,\n LayerNorm_type='WithBias', ## Other option 'BiasFree'\n num_degra_queries = 24,\n keep_degra = 48,\n degra_type = 5,\n sam = True,\n ops_type = 5,\n pred = True\n ):\n super(IDR_restormer, self).__init__()\n\n self.de_dict = {'denoise': 0, 'denoise_15': 0, 'denoise_25': 0, 'denoise_50': 0, 'derain': 1, 'dehaze': 2, 'deblur': 3, 'delowlight': 4, 'clean': 5}\n\n self.patch_embed =OverlapPatchEmbed_Keep(inp_channels, dim)\n\n self.encoder_level1 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=dim, num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias,\n LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])\n\n self.down1_2 = Downsample(dim) ## From Level 1 to Level 2\n self.encoder_level2 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])])\n\n self.down2_3 = Downsample(int(dim * 2 ** 1)) ## From Level 2 to Level 3\n self.encoder_level3 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 2), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])])\n\n self.down3_4 = Downsample(int(dim * 2 ** 2)) ## From Level 3 to Level 4\n self.latent = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 3), num_heads=heads[3], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[3])])\n\n self.up4_3 = Upsample(int(dim * 2 ** 3)) ## From Level 4 to Level 3\n self.reduce_chan_level3 = nn.Conv2d(int(dim * 2 ** 3), int(dim * 2 ** 2), kernel_size=1, bias=bias)\n self.decoder_level3 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 2), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])])\n\n self.up3_2 = Upsample(int(dim * 2 ** 2)) ## From Level 3 to Level 2\n self.reduce_chan_level2 = nn.Conv2d(int(dim * 2 ** 2), int(dim * 2 ** 1), kernel_size=1, bias=bias)\n self.decoder_level2 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])])\n\n self.up2_1 = Upsample(int(dim * 2 ** 1)) ## From Level 2 to Level 1 (NO 1x1 conv to reduce channels)\n\n self.decoder_level1 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])\n\n self.refinement = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_refinement_blocks)])\n\n self.output = nn.Conv2d(int(dim * 2 ** 1), out_channels, kernel_size=3, stride=1, padding=1, bias=bias)\n\n self.degra_key = nn.Parameter(torch.randn(degra_type, num_degra_queries, int(dim * 2 ** 3)), requires_grad=True)\n self.dmixer = PI_MLP_Mixer(dim=int(dim * 2 ** 3),num_degra=num_degra_queries*degra_type,keep_degra=keep_degra,init='pca')\n self.kdp_level1 = Key_TransformerBlock(dim=dim, dimkey=int(dim * 2 ** 3), num_heads=heads[0], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)\n self.kdp_level2 = Key_TransformerBlock(dim=int(dim * 2 ** 1), dimkey=int(dim * 2 ** 3), num_heads=heads[1], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)\n self.kdp_level3 = Key_TransformerBlock(dim=int(dim * 2 ** 2), dimkey=int(dim * 2 ** 3), num_heads=heads[2], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)\n self.cri_pix = nn.L1Loss().cuda()\n\n\n\n def forward(self, inp_img, degra_type=None, gt=None, epoch=None):\n \"\"\"\n only input_image is required during inference\n \"\"\"\n flag=0\n batch_size,c,h,w = inp_img.shape\n if epoch and epoch <= 550:\n # stage 1 training - Task-oriented knowledge collection\n de_type = degra_type[0]\n degra_id = self.de_dict[de_type]\n degra_key = self.degra_key[degra_id,:,:].unsqueeze(0).expand(batch_size,-1,-1)\n else:\n # stage 2 training - Ingredients-oriented knowedge intergation\n if flag==0:\n U,S,V = process_USV(self.degra_key.detach())\n flag=1\n U,V = self.dmixer(U,V,batch_size)\n degra_key = [U,S,V]\n de_type = None\n\n\n inp_enc_level1 = self.patch_embed(inp_img)\n out_enc_level1 = self.encoder_level1(inp_enc_level1)\n torch_resize1 = Resize([out_enc_level1.shape[2],out_enc_level1.shape[3]])\n inp_img1 = torch_resize1(inp_img)\n out_enc_level1,output_img1,pred1 = self.kdp_level1(out_enc_level1,degra_key,inp_img1,degra_type=de_type)\n\n inp_enc_level2 = self.down1_2(out_enc_level1)\n out_enc_level2 = self.encoder_level2(inp_enc_level2)\n torch_resize2 = Resize([out_enc_level2.shape[2],out_enc_level2.shape[3]])\n inp_img2 = torch_resize2(inp_img)\n out_enc_level2,output_img2,pred2 = self.kdp_level2(out_enc_level2,degra_key,inp_img2,degra_type=de_type)\n\n inp_enc_level3 = self.down2_3(out_enc_level2)\n out_enc_level3 = self.encoder_level3(inp_enc_level3)\n torch_resize3 = Resize([out_enc_level3.shape[2],out_enc_level3.shape[3]])\n inp_img3 = torch_resize3(inp_img)\n out_enc_level3,output_img3,pred3 = self.kdp_level3(out_enc_level3,degra_key,inp_img3,degra_type=de_type)\n\n inp_enc_level4 = self.down3_4(out_enc_level3)\n latent = self.latent(inp_enc_level4)\n\n inp_dec_level3 = self.up4_3(latent)\n inp_dec_level3 = torch.cat([inp_dec_level3, out_enc_level3], 1)\n inp_dec_level3 = self.reduce_chan_level3(inp_dec_level3)\n out_dec_level3 = self.decoder_level3(inp_dec_level3)\n\n inp_dec_level2 = self.up3_2(out_dec_level3)\n inp_dec_level2 = torch.cat([inp_dec_level2, out_enc_level2], 1)\n inp_dec_level2 = self.reduce_chan_level2(inp_dec_level2)\n out_dec_level2 = self.decoder_level2(inp_dec_level2)\n\n inp_dec_level1 = self.up2_1(out_dec_level2)\n inp_dec_level1 = torch.cat([inp_dec_level1, out_enc_level1], 1)\n out_dec_level1 = self.decoder_level1(inp_dec_level1)\n\n out_dec_level1 = self.refinement(out_dec_level1)\n out_dec_level1 = self.output(out_dec_level1) + inp_img\n \n if gt is not None:\n gt_img1 = torch_resize1(gt)\n gt_img2 = torch_resize2(gt)\n gt_img3 = torch_resize3(gt)\n output_img = [output_img1,output_img2,output_img3] \n gt_img = [gt_img1,gt_img2,gt_img3] \n loss = np.sum([self.cri_pix(output_img[j],gt_img[j]) for j in range(len(output_img))])\n return [out_dec_level1,loss,pred1,pred2,pred3]\n else:\n return out_dec_level1"
}
] | import argparse
import subprocess
import numpy as np
import os
import torch
import torch.nn as nn
import logging
from tqdm import tqdm
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torchvision.transforms import ToPILImage, Compose, RandomCrop, ToTensor
from utils.metric_util import AverageMeter
from utils.tensor_op import save_img_tensor, save_image_tensor
from utils.util import mkdir, setup_logger
from utils.data_util import crop_HWC_img, random_augmentation, tensor2img
from metrics.psnr_ssim import compute_psnr_ssim, calculate_psnr, calculate_ssim
from models.archs.IDR_restormer_arch import IDR_restormer | 7,498 | self._init_input_ids()
def _edgeComputation(self,x):
x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:])
x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:])
y = np.zeros_like(x)
y[:,1:,:] += x_diffx
y[:,:-1,:] += x_diffx
y[1:,:,:] += x_diffy
y[:-1,:,:] += x_diffy
y = np.sum(y,2)/3
y /= 4
return y[:,:,None].astype(np.float32)
def __getitem__(self, idx):
degraded_path = self.ids[idx]
clean_path = self._get_gt_path(degraded_path)
degraded_img = crop_HWC_img(np.array(Image.open(degraded_path).convert('RGB')), base=32)
clean_img = crop_HWC_img(np.array(Image.open(clean_path).convert('RGB')), base=32)
clean_img, degraded_img = self.toTensor(clean_img), self.toTensor(degraded_img)
degraded_name = degraded_path.split('/')[-1][:-4]
return [degraded_name], degraded_img, clean_img
def __len__(self):
return self.length
def test_Denoise(net, dataset, task="CBSD68", sigma=15,save_img=True):
logger = logging.getLogger('base')
output_path = opt.output_path + 'denoise/' + str(sigma) + '/'
# subprocess.check_output(['mkdir', '-p', output_path])
mkdir(output_path)
dataset.set_dataset(task)
dataset.set_sigma(sigma)
testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0)
psnr = AverageMeter()
ssim = AverageMeter()
with torch.no_grad():
for ([clean_name], degrad_patch, clean_patch) in tqdm(testloader):
degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda()
restored = net(degrad_patch)
if type(restored) == list:
restored = restored[0]
temp_psnr, temp_ssim, N = compute_psnr_ssim(restored, clean_patch)
psnr.update(temp_psnr, N)
ssim.update(temp_ssim, N)
if save_img:
save_image_tensor(restored, output_path + clean_name[0] + '.png')
logger.info("Deonise sigma=%d: psnr: %.2f, ssim: %.4f" % (sigma, psnr.avg, ssim.avg))
def test_Derain_Dehaze(net, dataset, task="derain",save_img=True):
logger = logging.getLogger('base')
output_path = opt.output_path + task + '/'
# subprocess.check_output(['mkdir', '-p', output_path])
mkdir(output_path)
dataset.set_dataset(task)
testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0)
psnr = AverageMeter()
ssim = AverageMeter()
with torch.no_grad():
for ([degraded_name], degrad_patch, clean_patch) in tqdm(testloader):
degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda()
restored = net(degrad_patch)
if type(restored) == list:
restored = restored[0]
temp_psnr, temp_ssim, N = compute_psnr_ssim(restored, clean_patch)
N = degrad_patch.shape[0]
psnr.update(temp_psnr, N)
ssim.update(temp_ssim, N)
if save_img:
save_image_tensor(restored, output_path + degraded_name[0] + '.png')
logger.info("PSNR: %.2f, SSIM: %.4f" % (psnr.avg, ssim.avg))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Input Parameters
parser.add_argument('--cuda', type=int, default=0)
parser.add_argument('--mode', type=int, default=0,
help='0 for 5 tasks, 1 for denoising details, 2 for unknowing UDC')
parser.add_argument('--denoise_CBSD68_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--denoise_urban100_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--denoise_Kodak24_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--derain_path', type=str, default="", help='save path of test raining images')
parser.add_argument('--dehaze_path', type=str, default="", help='save path of test hazy images')
parser.add_argument('--deblur_path', type=str, default="", help='save path of test blur images')
parser.add_argument('--low_light_path', type=str, default="", help='save path of test low-light images')
parser.add_argument('--udc_T_path', type=str, default="", help='save path of test udc Toled images')
parser.add_argument('--udc_P_path', type=str, default="", help='save path of test udc Poled images')
parser.add_argument('--output_path', type=str, default="./results/visualization", help='output save path')
parser.add_argument('--ckpt_path', type=str, default="", help='checkpoint save path')
parser.add_argument('--log_path', type=str, default="./results/log", help='checkpoint save path')
opt = parser.parse_args()
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.set_device(opt.cuda)
denoise_set = DenoiseTestDataset(opt)
derain_set = DerainDehazeDataset(opt)
# Make network
|
class DenoiseTestDataset(Dataset):
def __init__(self, args, dataset="CBSD68"):
super(DenoiseTestDataset, self).__init__()
self.args = args
self.clean_ids = []
self.sigma = 15
self.dataset_dict = {'CBSD68': 0, 'urban100': 1, 'Kodak24':2}
self.set_dataset(dataset)
self.toTensor = ToTensor()
def _init_clean_ids(self):
if self.task_idx == 0:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_CBSD68_path)
self.clean_ids += [self.args.denoise_CBSD68_path + id_ for id_ in name_list]
elif self.task_idx == 1:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_urban100_path)
self.clean_ids += [self.args.denoise_urban100_path + id_ for id_ in name_list]
elif self.task_idx == 2:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_Kodak24_path)
self.clean_ids += [self.args.denoise_Kodak24_path + id_ for id_ in name_list]
self.num_clean = len(self.clean_ids)
def set_dataset(self, dataset):
self.task_idx = self.dataset_dict[dataset]
self._init_clean_ids()
def _add_gaussian_noise(self, clean_patch):
noise = np.random.randn(*clean_patch.shape)
noisy_patch = np.clip(clean_patch + noise * self.sigma, 0, 255).astype(np.uint8)
return noisy_patch, clean_patch
def _edgeComputation(self,x):
x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:])
x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:])
y = np.zeros_like(x)
y[:,1:,:] += x_diffx
y[:,:-1,:] += x_diffx
y[1:,:,:] += x_diffy
y[:-1,:,:] += x_diffy
y = np.sum(y,2)/3
y /= 4
return y[:,:,None].astype(np.float32)
def set_sigma(self, sigma):
self.sigma = sigma
def __getitem__(self, clean_id):
clean_img = crop_HWC_img(np.array(Image.open(self.clean_ids[clean_id]).convert('RGB')), base=32)
clean_name = self.clean_ids[clean_id].split("/")[-1].split('.')[0]
noisy_img, _ = self._add_gaussian_noise(clean_img)
clean_img, noisy_img = self.toTensor(clean_img), self.toTensor(noisy_img)
return [clean_name], noisy_img, clean_img
def __len__(self):
return self.num_clean
class DerainDehazeDataset(Dataset):
def __init__(self, args, task="derain"):
super(DerainDehazeDataset, self).__init__()
self.ids = []
self.task_idx = 0
self.args = args
self.task_dict = {'derain': 0, 'dehaze': 1, 'deblur':2, 'low-light':3, 'UDC_T':4, 'UDC_P':5}
self.toTensor = ToTensor()
self.set_dataset(task)
def _init_input_ids(self):
if self.task_idx == 0:
self.ids = []
name_list = os.listdir(self.args.derain_path + 'input/')
self.ids += [self.args.derain_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 1:
self.ids = []
name_list = os.listdir(self.args.dehaze_path + 'input/')
self.ids += [self.args.dehaze_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 2:
self.ids = []
name_list = os.listdir(self.args.deblur_path + 'input/')
self.ids += [self.args.deblur_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 3:
self.ids = []
name_list = os.listdir(self.args.low_light_path + 'input/')
self.ids += [self.args.low_light_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 4:
self.ids = []
name_list = os.listdir(self.args.udc_T_path + 'input/')
self.ids += [self.args.udc_T_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 5:
self.ids = []
name_list = os.listdir(self.args.udc_P_path + 'input/')
self.ids += [self.args.udc_P_path + 'input/' + id_ for id_ in name_list]
self.length = len(self.ids)
def _get_gt_path(self, degraded_name):
if self.task_idx == 0:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 1:
dir_name = degraded_name.split("input")[0] + 'target/'
name = degraded_name.split('/')[-1].split('_')[0] + '.png'
gt_name = dir_name + name
elif self.task_idx == 2:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 3:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 4:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 5:
gt_name = degraded_name.replace("input", "target")
return gt_name
def set_dataset(self, task):
self.task_idx = self.task_dict[task]
self._init_input_ids()
def _edgeComputation(self,x):
x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:])
x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:])
y = np.zeros_like(x)
y[:,1:,:] += x_diffx
y[:,:-1,:] += x_diffx
y[1:,:,:] += x_diffy
y[:-1,:,:] += x_diffy
y = np.sum(y,2)/3
y /= 4
return y[:,:,None].astype(np.float32)
def __getitem__(self, idx):
degraded_path = self.ids[idx]
clean_path = self._get_gt_path(degraded_path)
degraded_img = crop_HWC_img(np.array(Image.open(degraded_path).convert('RGB')), base=32)
clean_img = crop_HWC_img(np.array(Image.open(clean_path).convert('RGB')), base=32)
clean_img, degraded_img = self.toTensor(clean_img), self.toTensor(degraded_img)
degraded_name = degraded_path.split('/')[-1][:-4]
return [degraded_name], degraded_img, clean_img
def __len__(self):
return self.length
def test_Denoise(net, dataset, task="CBSD68", sigma=15,save_img=True):
logger = logging.getLogger('base')
output_path = opt.output_path + 'denoise/' + str(sigma) + '/'
# subprocess.check_output(['mkdir', '-p', output_path])
mkdir(output_path)
dataset.set_dataset(task)
dataset.set_sigma(sigma)
testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0)
psnr = AverageMeter()
ssim = AverageMeter()
with torch.no_grad():
for ([clean_name], degrad_patch, clean_patch) in tqdm(testloader):
degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda()
restored = net(degrad_patch)
if type(restored) == list:
restored = restored[0]
temp_psnr, temp_ssim, N = compute_psnr_ssim(restored, clean_patch)
psnr.update(temp_psnr, N)
ssim.update(temp_ssim, N)
if save_img:
save_image_tensor(restored, output_path + clean_name[0] + '.png')
logger.info("Deonise sigma=%d: psnr: %.2f, ssim: %.4f" % (sigma, psnr.avg, ssim.avg))
def test_Derain_Dehaze(net, dataset, task="derain",save_img=True):
logger = logging.getLogger('base')
output_path = opt.output_path + task + '/'
# subprocess.check_output(['mkdir', '-p', output_path])
mkdir(output_path)
dataset.set_dataset(task)
testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0)
psnr = AverageMeter()
ssim = AverageMeter()
with torch.no_grad():
for ([degraded_name], degrad_patch, clean_patch) in tqdm(testloader):
degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda()
restored = net(degrad_patch)
if type(restored) == list:
restored = restored[0]
temp_psnr, temp_ssim, N = compute_psnr_ssim(restored, clean_patch)
N = degrad_patch.shape[0]
psnr.update(temp_psnr, N)
ssim.update(temp_ssim, N)
if save_img:
save_image_tensor(restored, output_path + degraded_name[0] + '.png')
logger.info("PSNR: %.2f, SSIM: %.4f" % (psnr.avg, ssim.avg))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Input Parameters
parser.add_argument('--cuda', type=int, default=0)
parser.add_argument('--mode', type=int, default=0,
help='0 for 5 tasks, 1 for denoising details, 2 for unknowing UDC')
parser.add_argument('--denoise_CBSD68_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--denoise_urban100_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--denoise_Kodak24_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--derain_path', type=str, default="", help='save path of test raining images')
parser.add_argument('--dehaze_path', type=str, default="", help='save path of test hazy images')
parser.add_argument('--deblur_path', type=str, default="", help='save path of test blur images')
parser.add_argument('--low_light_path', type=str, default="", help='save path of test low-light images')
parser.add_argument('--udc_T_path', type=str, default="", help='save path of test udc Toled images')
parser.add_argument('--udc_P_path', type=str, default="", help='save path of test udc Poled images')
parser.add_argument('--output_path', type=str, default="./results/visualization", help='output save path')
parser.add_argument('--ckpt_path', type=str, default="", help='checkpoint save path')
parser.add_argument('--log_path', type=str, default="./results/log", help='checkpoint save path')
opt = parser.parse_args()
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.set_device(opt.cuda)
denoise_set = DenoiseTestDataset(opt)
derain_set = DerainDehazeDataset(opt)
# Make network | net = IDR_restormer(inp_channels=3, out_channels=3, dim=24, num_blocks=[2,3,3,4], num_refinement_blocks=2, heads=[1,2,4,8], ffn_expansion_factor=2.66, bias=False, LayerNorm_type='WithBias', num_degra_queries = 24, keep_degra=48) | 11 | 2023-12-07 10:58:34+00:00 | 12k |
neu-spiral/multi-label-emg | multi_label_emg/train.py | [
{
"identifier": "load_data_dict",
"path": "multi_label_emg/data.py",
"snippet": "def load_data_dict():\n \"\"\"\n Loads features and labels from subject folders into a single dictionary as described below.\n NOTE - preprocessing should be been done first to extract features from raw data (see README).\n\n data_dict = {\n Subj0: {\n Calibration_features: ...,\n Calibration_dir_labels: ...,\n Calibration_mod_labels: ...,\n Calibration_visual_dir_labels: ...,\n Calibration_visual_mod_labels: ...,\n SimultaneousPulse1_NoFeedback_features: ...,\n ...\n },\n ...\n }\n \"\"\"\n\n blocks = [\"Calibration\"]\n for i in [1, 2, 3]:\n for feedback in [\"NoFeedBack\", \"WithFeedBack\"]:\n blocks.append(f\"SimultaneousPulse{i}_{feedback}\")\n blocks.append(f\"HoldPulse{i}_{feedback}\")\n\n results = {}\n for i in trange(11, desc=\"Load Subjects\", leave=True):\n results[f\"Subj{i}\"] = {}\n for block in tqdm(blocks, leave=False, position=1):\n path = DATASET_DIR / \"python\" / f\"Subj{i}\" / block\n # NOTE - features.npy is created during preprocessing script\n results[f\"Subj{i}\"][f\"{block}_features\"] = np.load(path / \"features.npy\")\n results[f\"Subj{i}\"][f\"{block}_dir_labels\"] = np.load(path / \"joystick_direction_labels.npy\")\n results[f\"Subj{i}\"][f\"{block}_mod_labels\"] = np.load(path / \"joystick_modifier_labels.npy\")\n results[f\"Subj{i}\"][f\"{block}_visual_dir_labels\"] = np.load(path / \"visual_direction_labels.npy\")\n results[f\"Subj{i}\"][f\"{block}_visual_mod_labels\"] = np.load(path / \"visual_modifier_labels.npy\")\n return results"
},
{
"identifier": "AvgPairs",
"path": "multi_label_emg/models.py",
"snippet": "class AvgPairs:\n \"\"\"Create fake doubles by averaging pairs of singles. New items have hard labels including both classes\"\"\"\n\n def __init__(self, n_per_class: int):\n self.n_per_class = n_per_class\n\n def __call__(self, x: np.ndarray, y_dir: np.ndarray, y_mod: np.ndarray):\n \"\"\"\n Args:\n x_single: (n_samples_in, n_features) - data/features from single gestures\n y_dir_single: (n_samples_in, DIR_PROBS_SHAPE) - one-hot labels of direction gestures\n y_mod_single: (n_samples_in, MOD_PROBS_SHAPE) - one-hot labels of modifier gestures\n\n Returns:\n x_prime: (n_samples_aug, n_features) - augmented data\n y_prime_dir: (n_samples_aug, len(DIRECTION_GESTURES)) - augmented labels\n y_prime_mod: (n_samples_aug, len(MODIFIER_GESTURES)) - augmented labels\n \"\"\"\n x_dir, x_mod, y_dir, y_mod = split_dir_mod(x, y_dir, y_mod)\n x_aug, y_dir_aug, y_mod_aug = [], [], []\n for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):\n x_aug.append((x1 + x2) / 2)\n y_dir_aug.append(y1)\n y_mod_aug.append(y2)\n x_aug = np.stack(x_aug)\n y_dir_aug = np.stack(y_dir_aug)\n y_mod_aug = np.stack(y_mod_aug)\n\n if self.n_per_class > 0:\n # For each combination class, truncate to self.n_per_class\n res_x, res_y_dir, res_y_mod = [], [], []\n for d in np.unique(y_dir_aug, axis=0):\n for m in np.unique(y_mod_aug, axis=0):\n idx = np.where(np.logical_and((y_dir_aug == d).all(-1), (y_mod_aug == m).all(-1)))[0]\n perm = np.random.permutation(len(idx))\n res_x.append(x_aug[idx[perm[: self.n_per_class]]])\n res_y_dir.append(y_dir_aug[idx[perm[: self.n_per_class]]])\n res_y_mod.append(y_mod_aug[idx[perm[: self.n_per_class]]])\n\n x_aug = np.concatenate(res_x)\n y_dir_aug = np.concatenate(res_y_dir)\n y_mod_aug = np.concatenate(res_y_mod)\n\n return x_aug, y_dir_aug, y_mod_aug\n\n def __repr__(self):\n return f\"{type(self).__name__}(n_per_class={self.n_per_class})\""
},
{
"identifier": "ElementwiseMaxPairs",
"path": "multi_label_emg/models.py",
"snippet": "class ElementwiseMaxPairs:\n \"\"\"Create fake doubles by taking elementwise max of each feature.\n New items have hard labels including both classes\"\"\"\n\n def __init__(self, n_per_class: int):\n self.n_per_class = n_per_class\n\n def __call__(self, x: np.ndarray, y_dir: np.ndarray, y_mod: np.ndarray):\n \"\"\"\n Args:\n x_single: (n_samples_in, n_features) - data/features from single gestures\n y_dir_single: (n_samples_in, DIR_PROBS_SHAPE) - one-hot labels of direction gestures\n y_mod_single: (n_samples_in, MOD_PROBS_SHAPE) - one-hot labels of modifier gestures\n\n Returns:\n x_prime: (n_samples_aug, n_features) - augmented data\n y_prime_dir: (n_samples_aug, len(DIRECTION_GESTURES)) - augmented labels\n y_prime_mod: (n_samples_aug, len(MODIFIER_GESTURES)) - augmented labels\n \"\"\"\n x_dir, x_mod, y_dir, y_mod = split_dir_mod(x, y_dir, y_mod)\n x_aug, y_dir_aug, y_mod_aug = [], [], []\n for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):\n x_aug.append(np.maximum(x1, x2))\n y_dir_aug.append(y1)\n y_mod_aug.append(y2)\n x_aug = np.stack(x_aug)\n y_dir_aug = np.stack(y_dir_aug)\n y_mod_aug = np.stack(y_mod_aug)\n\n if self.n_per_class > 0:\n # For each combination class, truncate to self.n_per_class\n res_x, res_y_dir, res_y_mod = [], [], []\n for d in np.unique(y_dir_aug, axis=0):\n for m in np.unique(y_mod_aug, axis=0):\n idx = np.where(np.logical_and((y_dir_aug == d).all(-1), (y_mod_aug == m).all(-1)))[0]\n perm = np.random.permutation(len(idx))\n res_x.append(x_aug[idx[perm[: self.n_per_class]]])\n res_y_dir.append(y_dir_aug[idx[perm[: self.n_per_class]]])\n res_y_mod.append(y_mod_aug[idx[perm[: self.n_per_class]]])\n\n x_aug = np.concatenate(res_x)\n y_dir_aug = np.concatenate(res_y_dir)\n y_mod_aug = np.concatenate(res_y_mod)\n\n return x_aug, y_dir_aug, y_mod_aug\n\n def __repr__(self):\n return f\"{type(self).__name__}(n_per_class={self.n_per_class})\""
},
{
"identifier": "ParallelA",
"path": "multi_label_emg/models.py",
"snippet": "class ParallelA(BaseParallelModel):\n DEFAULT_SAVE_NAME = \"ParallelA.pkl\"\n\n def __init__(\n self,\n dir_clf,\n mod_clf,\n use_augmentation: bool,\n n_aug_per_class: int = -1,\n include_rest_data_for_clf: bool = False,\n ):\n self.dir_clf = dir_clf\n self.mod_clf = mod_clf\n self.use_augmentation = use_augmentation\n self.n_aug_per_class = n_aug_per_class\n self._n_aug_created = None\n self.include_rest_data_for_clf = include_rest_data_for_clf\n\n def get_params(self, deep=True):\n return {\n \"dir_clf\": self.dir_clf,\n \"mod_clf\": self.mod_clf,\n \"use_augmentation\": self.use_augmentation,\n \"n_aug_per_class\": self.n_aug_per_class,\n \"include_rest_data_for_clf\": self.include_rest_data_for_clf,\n }\n\n def fit(self, features, y_dir, y_mod):\n if self.use_augmentation:\n aug = AvgPairs(self.n_aug_per_class)\n aug_features, aug_dir_labels, aug_mod_labels = aug(features, y_dir, y_mod)\n features = np.concatenate([features, aug_features])\n y_dir = np.concatenate([y_dir, aug_dir_labels])\n y_mod = np.concatenate([y_mod, aug_mod_labels])\n self._n_aug_created = len(aug_features)\n\n if y_dir.ndim == 2:\n y_dir = y_dir.argmax(-1)\n if y_mod.ndim == 2:\n y_mod = y_mod.argmax(-1)\n\n if self.include_rest_data_for_clf:\n # In this case, the label (NoDir, NoMod) could mean \"active and doesn't fit our classes\" or \"resting\"\n self.dir_clf.fit(features, y_dir)\n self.mod_clf.fit(features, y_mod)\n else:\n # In this case, the label (NoDir, NoMod) means \"active and doesn't fit classes\".\n # \"Rest\" data is out-of-domain\n active_idx = np.logical_or(y_dir != NO_DIR_IDX, y_mod != NO_MOD_IDX)\n active_features = features[active_idx]\n active_y_dir = y_dir[active_idx]\n active_y_mod = y_mod[active_idx]\n\n self.dir_clf.fit(active_features, active_y_dir)\n self.mod_clf.fit(active_features, active_y_mod)\n return self\n\n def predict_proba(self, features):\n \"\"\"Only for gestures\"\"\"\n dir_probs = self.dir_clf.predict_proba(features)\n mod_probs = self.mod_clf.predict_proba(features)\n return dir_probs, mod_probs\n\n def predict(self, features):\n \"\"\"features.shape == (n_channels, n_samples) or (n_trials, n_channels, n_samples)\"\"\"\n dir_probs = self.dir_clf.predict_proba(features)\n mod_probs = self.mod_clf.predict_proba(features)\n return dir_probs.argmax(-1), mod_probs.argmax(-1)\n\n def save(self, save_dir: Path) -> Path:\n assert save_dir.exists() and save_dir.is_dir()\n file_path = save_dir / self.DEFAULT_SAVE_NAME\n with open(file_path, \"wb\") as f:\n pickle.dump(self, f)\n return file_path\n\n @classmethod\n def load(cls, file_path: Path) -> \"ParallelA\":\n with open(file_path, \"rb\") as f:\n return pickle.load(f)\n\n def __repr__(self):\n return (\n f\"{type(self).__name__}(dir_clf={self.dir_clf}, \"\n f\"use_augmentation={self.use_augmentation}, \"\n f\"n_aug_per_class={self.n_aug_per_class}, \"\n + f\"mod_clf={self.mod_clf}, \"\n + f\"include_rest_data_for_clf={self.include_rest_data_for_clf})\"\n )"
},
{
"identifier": "ParallelB",
"path": "multi_label_emg/models.py",
"snippet": "class ParallelB(BaseParallelModel):\n DEFAULT_SAVE_NAME = \"ParallelB.pkl\"\n\n def __init__(\n self,\n dir_clf,\n mod_clf,\n has_dir_clf,\n has_mod_clf,\n use_augmentation: bool,\n n_aug_per_class: int = -1,\n ):\n self.has_dir_clf = has_dir_clf\n self.has_mod_clf = has_mod_clf\n self.dir_clf = dir_clf\n self.mod_clf = mod_clf\n self.use_augmentation = use_augmentation\n self.n_aug_per_class = n_aug_per_class\n self._n_aug_created = None\n\n def get_params(self, deep=True):\n return {\n \"dir_clf\": self.dir_clf,\n \"mod_clf\": self.mod_clf,\n \"has_dir_clf\": self.dir_clf,\n \"has_mod_clf\": self.mod_clf,\n \"use_augmentation\": self.use_augmentation,\n \"n_aug_per_class\": self.n_aug_per_class,\n }\n\n def fit(self, features, y_dir, y_mod):\n if self.use_augmentation:\n aug = AvgPairs(self.n_aug_per_class)\n aug_features, aug_dir_labels, aug_mod_labels = aug(features, y_dir, y_mod)\n features = np.concatenate([features, aug_features])\n y_dir = np.concatenate([y_dir, aug_dir_labels])\n y_mod = np.concatenate([y_mod, aug_mod_labels])\n self._n_aug_created = len(aug_features)\n\n if y_dir.ndim == 2:\n y_dir = y_dir.argmax(-1)\n if y_mod.ndim == 2:\n y_mod = y_mod.argmax(-1)\n has_direction = y_dir != NO_DIR_IDX\n has_modifier = y_mod != NO_MOD_IDX\n # Event check\n self.has_dir_clf.fit(features, has_direction.astype(int))\n self.has_mod_clf.fit(features, has_modifier.astype(int))\n # Direction and modifier\n self.dir_clf.fit(features[has_direction], y_dir[has_direction])\n self.mod_clf.fit(features[has_modifier], y_mod[has_modifier])\n return self\n\n def predict_proba(self, features):\n p_has_direction = self.has_dir_clf.predict_proba(features)\n p_has_modifier = self.has_mod_clf.predict_proba(features)\n\n p_dir_probs = self.dir_clf.predict_proba(features)\n p_mod_probs = self.mod_clf.predict_proba(features)\n\n # Check probs\n dir_probs = np.zeros((features.shape[0], 5))\n mod_probs = np.zeros((features.shape[0], 3))\n dir_probs[:, NO_DIR_IDX] = p_has_direction[:, 0] # p(no_direction | x)\n mod_probs[:, NO_MOD_IDX] = p_has_modifier[:, 0] # p(no_modifier | x)\n dir_probs[:, :NO_DIR_IDX] = np.multiply(\n p_dir_probs, p_has_direction[:, 1][..., None]\n ) # p(direction | has_direction)\n mod_probs[:, :NO_MOD_IDX] = np.multiply(\n p_mod_probs, p_has_modifier[:, 1][..., None]\n ) # p(modifier | has_modifier)\n assert np.allclose(dir_probs.sum(-1), 1) and np.allclose(mod_probs.sum(-1), 1), \"Probabilities should sum to 1\"\n # return probs\n \"\"\"Only for gestures\"\"\"\n return dir_probs, mod_probs\n\n def predict(self, features):\n dir_probs, mod_probs = self.predict_proba(features)\n return dir_probs.argmax(-1), mod_probs.argmax(-1)\n\n def save(self, save_dir: Path) -> Path:\n assert save_dir.exists() and save_dir.is_dir()\n file_path = save_dir / self.DEFAULT_SAVE_NAME\n with open(file_path, \"wb\") as f:\n pickle.dump(self, f)\n return file_path\n\n @classmethod\n def load(cls, file_path: Path) -> \"ParallelB\":\n with open(file_path, \"rb\") as f:\n return pickle.load(f)\n\n def __repr__(self):\n return (\n f\"{type(self).__name__}(has_dir_clf={self.has_dir_clf}, \"\n f\"dir_clf={self.dir_clf}, \"\n f\"use_augmentation={self.use_augmentation}, \"\n f\"n_aug_per_class={self.n_aug_per_class}, \"\n f\"has_mod_clf={self.has_mod_clf}),\"\n f\"mod_clf={self.mod_clf})\"\n )"
},
{
"identifier": "NO_DIR_IDX",
"path": "multi_label_emg/utils.py",
"snippet": "NO_DIR_IDX = len(DIRECTION_GESTURES) # When predicting direction, we have an extra class representing \"None\""
},
{
"identifier": "NO_MOD_IDX",
"path": "multi_label_emg/utils.py",
"snippet": "NO_MOD_IDX = len(MODIFIER_GESTURES)"
},
{
"identifier": "RESULTS_DIR",
"path": "multi_label_emg/utils.py",
"snippet": "RESULTS_DIR = PROJECT_ROOT.parent / \"results\" # For experiment outputs and figures"
},
{
"identifier": "canonical_coords",
"path": "multi_label_emg/utils.py",
"snippet": "def canonical_coords():\n \"\"\"NOTE - order does not matter: (Up, Pinch) and (Pinch, Up) are both labeled as (Up, Pinch)\n Make a list table so we can convert:\n from integer labels such as (0, 1),\n to an index in confusion matrix and a string label\"\"\"\n result_int = []\n result_str = []\n\n # Add (<DIR>, NoMod) items\n for i, d in enumerate(DIRECTION_GESTURES):\n result_int.append((i, NO_MOD_IDX))\n result_str.append(f\"({d}, NoMod)\")\n\n # Add (NoDir, <MOD>) items\n for i, m in enumerate(MODIFIER_GESTURES):\n result_int.append((NO_DIR_IDX, i))\n result_str.append(f\"(NoDir, {m})\")\n\n # Add (<DIR>, <MOD>) items\n for i, d in enumerate(DIRECTION_GESTURES):\n for j, m in enumerate(MODIFIER_GESTURES):\n result_int.append((i, j))\n result_str.append(f\"({d}, {m})\")\n\n # Add the (NoDir, NoMod) item\n result_int.append((NO_DIR_IDX, NO_MOD_IDX))\n result_str.append(\"(NoDir, NoMod)\")\n\n return result_int, result_str"
},
{
"identifier": "confusion_matrix",
"path": "multi_label_emg/utils.py",
"snippet": "def confusion_matrix(y_true_2d, y_pred_2d, normalize_rows=True):\n \"\"\"\n Number of classes = 4 direction + 2 modifier + 4*2 combinations + (NoDir, NoMod) = 15\n Create a confusion matrix of shape (15, 15), arranged according to the canonical\n coordinates above\n\n NOTE - result may contain nans - use nanmean later\n \"\"\"\n coords, coords_str = canonical_coords()\n\n cm = np.zeros((len(coords), len(coords)), dtype=int)\n for yt, yp in zip(y_true_2d, y_pred_2d):\n cm[coords.index(tuple(yt)), coords.index(tuple(yp))] += 1\n if normalize_rows:\n cm = cm.astype(float)\n with np.errstate(all=\"ignore\"): # Ignore division by zero for empty rows\n cm /= cm.sum(axis=-1, keepdims=True)\n return cm"
},
{
"identifier": "str2bool",
"path": "multi_label_emg/utils.py",
"snippet": "def str2bool(s):\n if s.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif s.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise ValueError(\"Boolean value expected.\")"
}
] | import sys
import numpy as np
import plotly.graph_objects as go
import argparse
from loguru import logger
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.mixture import GaussianMixture
from sklearn.neighbors import KernelDensity, KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.svm import SVC
from multi_label_emg.data import load_data_dict
from multi_label_emg.models import AvgPairs, ElementwiseMaxPairs, ParallelA, ParallelB
from multi_label_emg.utils import (
NO_DIR_IDX,
NO_MOD_IDX,
RESULTS_DIR,
canonical_coords,
confusion_matrix,
str2bool,
) | 7,823 |
# Don't use "Feedback" blocks for this analysis
test_blocks = ["HoldPulse3_NoFeedBack", "SimultaneousPulse3_NoFeedBack"]
test_features = np.concatenate([data[f"{block}_features"] for block in test_blocks])
test_dir_labels = np.concatenate([data[f"{block}_dir_labels"] for block in test_blocks])
test_mod_labels = np.concatenate([data[f"{block}_mod_labels"] for block in test_blocks])
logger.info(f"test set: {test_features.shape=}, {test_dir_labels.shape=}, {test_mod_labels.shape=}")
# Vary strategy for augmented doubles
double_features_aug, double_dir_labels_aug, double_mod_labels_aug = get_augmented_doubles(
doubles_method,
feature_combine_type,
fraction_doubles_per_class,
train_features,
train_dir_labels,
train_mod_labels,
)
# Make augmented singles
# Figure out how many doubles per class. Take avg and then apply rel_fraction_singles_per_class to
# get the number of singles per class
n_singles_per_class = 0
if singles_method != "none":
doubles_labels_2d = np.stack((double_dir_labels_aug.argmax(-1), double_mod_labels_aug.argmax(-1)), axis=-1)
class_sizes = np.unique(doubles_labels_2d, axis=0, return_counts=True)[-1]
n_singles_per_class = int(np.round(np.mean(class_sizes) * rel_fraction_singles_per_class))
single_features_aug, single_dir_labels_aug, single_mod_labels_aug = get_augmented_singles(
singles_method, n_singles_per_class, train_features, train_dir_labels, train_mod_labels
)
# Merge all train data
train_features = np.concatenate([train_features, double_features_aug, single_features_aug])
train_dir_labels = np.concatenate([train_dir_labels, double_dir_labels_aug, single_dir_labels_aug])
train_mod_labels = np.concatenate([train_mod_labels, double_mod_labels_aug, single_mod_labels_aug])
logger.info(f"Augmented train set: {train_features.shape=}, {train_dir_labels.shape=}, {train_mod_labels.shape=}")
# Create model
if parallel_model_type == "ParallelA":
model = ParallelA(
get_clf(clf_name, num_classes=5),
get_clf(clf_name, num_classes=3),
use_augmentation=False,
include_rest_data_for_clf=True,
)
elif parallel_model_type == "ParallelB":
model = ParallelB(
dir_clf=get_clf(clf_name, num_classes=4),
mod_clf=get_clf(clf_name, num_classes=2),
has_dir_clf=get_clf(clf_name, num_classes=2),
has_mod_clf=get_clf(clf_name, num_classes=2),
use_augmentation=False,
# include_rest_data_for_clf=True, # NOTE - always using true, flag is not in model
)
elif parallel_model_type == "SerialControl":
model = get_clf(clf_name, num_classes=15)
else:
raise ValueError(f"Unknown parallel model type: {parallel_model_type}")
# Train
logger.info("Train...")
if parallel_model_type == "SerialControl":
# Convert labels to integer by making 2-digit numbers,
# where the 10s place is the dir label and the 1s place is the mod label
train_labels = train_dir_labels.argmax(-1) * 10 + train_mod_labels.argmax(-1)
model.fit(train_features, train_labels)
else:
model.fit(train_features, train_dir_labels, train_mod_labels)
# Evaluate
logger.info("Evaluate")
if parallel_model_type == "SerialControl":
combined_preds = model.predict(test_features)
dir_preds = combined_preds // 10
mod_preds = combined_preds % 10
else:
dir_preds, mod_preds = model.predict(test_features)
preds_2d = np.stack([dir_preds, mod_preds], axis=-1)
true_labels_2d = np.stack([test_dir_labels.argmax(-1), test_mod_labels.argmax(-1)], axis=-1)
return confusion_matrix(true_labels_2d, preds_2d)
if __name__ == "__main__":
logger.remove()
logger.add(sys.stdout, level="INFO", colorize=True)
parser = argparse.ArgumentParser()
parser.add_argument("--subject", type=str, required=True)
parser.add_argument("--seed", type=int, required=True)
parser.add_argument("--parallel_model_type", choices=["ParallelA", "ParallelB", "SerialControl"], required=True)
clf_names = ["mlp", "rf", "logr"]
parser.add_argument("--clf_name", type=str, choices=clf_names, required=True)
doubles_methods = [
"none",
"subset_uniform",
"subset_near_mean",
"subset_spaced_quantiles",
"subsetInput_uniform",
"subsetInput_near_mean",
"subsetInput_spaced_quantiles",
"all",
]
parser.add_argument("--doubles_method", type=str, choices=doubles_methods, required=True)
parser.add_argument("--fraction_doubles_per_class", type=float, required=True)
singles_methods = [
"none",
"add-gaussian-0.3",
"add-gaussian-0.4",
"add-gaussian-0.5",
"fit-gmm-1",
"fit-gmm-5",
"fit-gmm-10",
"fit-kde-gaussian-silverman",
"fit-kde-gaussian-0.01",
"fit-kde-gaussian-0.1",
"fit-kde-gaussian-1.0",
]
parser.add_argument("--singles_method", type=str, choices=singles_methods, required=True)
parser.add_argument("--rel_fraction_singles_per_class", type=float, required=True)
|
def get_name(
subject: str,
seed: int,
parallel_model_type: str,
clf_name: str,
doubles_method: str,
fraction_doubles_per_class: float,
singles_method: str,
rel_fraction_singles_per_class: float,
include_doubles_in_train: bool,
feature_combine_type: str,
):
return "__".join(
[
f"subj={subject}",
f"seed={seed}",
f"par={parallel_model_type}",
f"clf={clf_name}",
f"doubles={doubles_method}",
f"frac_doubles={fraction_doubles_per_class}",
f"singles={singles_method}",
f"frac_singles={rel_fraction_singles_per_class}",
f"incl_doubles={include_doubles_in_train}",
f"feat_type={feature_combine_type}",
]
)
def plot_confusion_matrix(data: np.ndarray):
def make_text(cm):
text = []
for v in cm.flatten():
text.append(f"{round(v, 2)}")
return np.array(text).reshape(cm.shape)
coords, coords_str = canonical_coords()
text = make_text(data)
fig = go.Figure()
fig.update_layout(
# margin=margin,
xaxis=dict(
title="Predicted",
tickangle=-45,
tickmode="array",
ticktext=coords_str,
tickvals=list(range(len(coords_str))),
constrain="domain",
),
yaxis=dict(
title="Actual",
tickmode="array",
ticktext=coords_str,
tickvals=list(range(len(coords_str))),
autorange="reversed",
scaleanchor="x",
scaleratio=1,
constrain="domain",
),
)
fig.add_trace(
go.Heatmap(z=data, text=text, texttemplate="%{text}", zmin=0, zmax=1, colorscale="Blues", showscale=False)
)
return fig
def subset_doubles_uniform(
n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray
):
"""For each class, take n_per_class items uniformly at random"""
res_x, res_y_dir, res_y_mod = [], [], []
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
subset_idx = np.random.choice(idx, size=n_per_class, replace=False)
res_x.append(features_aug[subset_idx])
res_y_dir.append(dir_labels_aug[subset_idx])
res_y_mod.append(mod_labels_aug[subset_idx])
features_aug = np.concatenate(res_x)
dir_labels_aug = np.concatenate(res_y_dir)
mod_labels_aug = np.concatenate(res_y_mod)
return features_aug, dir_labels_aug, mod_labels_aug
def subset_doubles_near_mean(
n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray
):
"""For each class, take n_per_class items closest to the mean of these synthetic items"""
# Find class means
class_means = {}
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
class_means[(d, m)] = np.mean(features_aug[idx], axis=0)
# Subset each class by taking items closest to mean
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
class_mean = class_means[(d, m)]
idx = np.where((labels_2d == (d, m)).all(-1))[0]
dists = np.linalg.norm(features_aug[idx] - class_mean, axis=-1)
k_smallest_idx = np.argpartition(dists, n_per_class)[:n_per_class]
subset_idx = idx[k_smallest_idx]
res_x.append(features_aug[subset_idx])
res_y_dir.append(dir_labels_aug[subset_idx])
res_y_mod.append(mod_labels_aug[subset_idx])
features_aug = np.concatenate(res_x)
dir_labels_aug = np.concatenate(res_y_dir)
mod_labels_aug = np.concatenate(res_y_mod)
return features_aug, dir_labels_aug, mod_labels_aug
def subset_doubles_spaced_quantiles(
n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray
):
"""For each class, rank items by their distance to the class mean,
and take items with ranks 1, K+1, 2K+1.
The spacing K will be approx (class_size / n_per_class)
"""
# Find class means
class_means = {}
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
class_means[(d, m)] = np.mean(features_aug[idx], axis=0)
# Subset each class by taking items closest to mean
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
class_mean = class_means[(d, m)]
idx = np.where((labels_2d == (d, m)).all(-1))[0]
dists = np.linalg.norm(features_aug[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
spacing = int(np.floor(len(idx) / n_per_class))
# Since we use floor, we step slightly too little.
# In case this gives us extra items, we also truncate.
subset_idx = idx[ranked_distances[::spacing][:n_per_class]]
n_subset = len(subset_idx)
assert abs(n_subset - n_per_class) <= 1
res_x.append(features_aug[subset_idx])
res_y_dir.append(dir_labels_aug[subset_idx])
res_y_mod.append(mod_labels_aug[subset_idx])
features_aug = np.concatenate(res_x)
dir_labels_aug = np.concatenate(res_y_dir)
mod_labels_aug = np.concatenate(res_y_mod)
return features_aug, dir_labels_aug, mod_labels_aug
def subset_dir_mod(
method: str, fraction_doubles_per_class: float, features: np.ndarray, dir_labels: np.ndarray, mod_labels: np.ndarray
):
# Should have 1-hot vector labels
assert dir_labels.ndim == 2
assert mod_labels.ndim == 2
# check these are all singles
items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == 0
labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1)
# Figure out how many items we have per class
# Then use fraction_doubles_per_class to figure out how many doubles we want
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
n_per_class = int(np.round(fraction_doubles_per_class * np.mean(class_sizes)))
n_per_class = min(n_per_class, np.min(class_sizes))
logger.info(f"Initial class sizes: {class_sizes}, n_per_class: {n_per_class}")
# For each class, fit a multivariate gaussian and sample the requested number of points
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
class_mean = np.mean(features[idx], axis=0)
if method == "subsetInput_uniform":
subset_idx = np.random.choice(idx, n_per_class, replace=False)
elif method == "subsetInput_near_mean":
dists = np.linalg.norm(features[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
subset_idx = idx[ranked_distances[:n_per_class]]
elif method == "subsetInput_spaced_quantiles":
dists = np.linalg.norm(features[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
spacing = int(np.floor(len(idx) / n_per_class))
# Since we use floor, we step slightly too little.
# In case this gives us extra items, we also truncate.
subset_idx = idx[ranked_distances[::spacing][:n_per_class]]
n_subset = len(subset_idx)
assert abs(n_subset - n_per_class) <= 1
res_x.append(features[subset_idx])
res_y_dir.append(dir_labels[subset_idx])
res_y_mod.append(mod_labels[subset_idx])
res_x = np.concatenate(res_x)
res_y_dir = np.concatenate(res_y_dir)
res_y_mod = np.concatenate(res_y_mod)
labels_2d = np.stack([res_y_dir.argmax(-1), res_y_mod.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Class sizes after subset: {class_sizes}")
return res_x, res_y_dir, res_y_mod
def get_augmented_doubles(
method: str,
feature_combine_type: str,
fraction_doubles_per_class: float,
features: np.ndarray,
dir_labels: np.ndarray,
mod_labels: np.ndarray,
):
if feature_combine_type == "avg":
aug = AvgPairs(-1)
elif feature_combine_type == "max":
aug = ElementwiseMaxPairs(-1)
else:
raise ValueError(f"Unknown feature_combine_type: {feature_combine_type}")
if method == "none":
logger.info("No synthetic doubles")
# We create nothing and return early
features_aug = np.empty((0, *features.shape[1:]))
dir_labels_aug = np.empty((0, *dir_labels.shape[1:]))
mod_labels_aug = np.empty((0, *mod_labels.shape[1:]))
return features_aug, dir_labels_aug, mod_labels_aug
if method.startswith("subsetInput"):
# NOTE - here, n_per_class means how many items in each INPUT class
# Do the subsetting before making combinations
logger.info("Subset before creating doubles")
features_subset, dir_labels_subset, mod_labels_subset = subset_dir_mod(
method, fraction_doubles_per_class, features, dir_labels, mod_labels
)
features_aug, dir_labels_aug, mod_labels_aug = aug(features_subset, dir_labels_subset, mod_labels_subset)
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Final synthetic double class sizes: {class_sizes}")
return features_aug, dir_labels_aug, mod_labels_aug
# Other methods create all combinations and THEN subset
# First, create all augmented items
logger.info("Subset after creating doubles")
features_aug, dir_labels_aug, mod_labels_aug = aug(features, dir_labels, mod_labels)
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Initial synthetic double class sizes: {class_sizes}")
# check these are all doubles
items_with_dir = dir_labels_aug.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels_aug.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == len(features_aug)
# Figure out how many items we want per class
n_per_class = int(np.round(fraction_doubles_per_class * np.mean(class_sizes)))
n_per_class = min(n_per_class, np.min(class_sizes))
# Then, subset as requested
if method == "all":
pass
elif method == "subset_uniform":
features_aug, dir_labels_aug, mod_labels_aug = subset_doubles_uniform(
n_per_class, features_aug, dir_labels_aug, mod_labels_aug
)
elif method == "subset_near_mean":
features_aug, dir_labels_aug, mod_labels_aug = subset_doubles_near_mean(
n_per_class, features_aug, dir_labels_aug, mod_labels_aug
)
elif method == "subset_spaced_quantiles":
features_aug, dir_labels_aug, mod_labels_aug = subset_doubles_spaced_quantiles(
n_per_class, features_aug, dir_labels_aug, mod_labels_aug
)
else:
raise ValueError(f"Unknown augmentation method: {method}")
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Final synthetic double class sizes: {class_sizes}")
return features_aug, dir_labels_aug, mod_labels_aug
def get_noise_simple(x, relative_std):
"""Add noise to x, where the noise standard deviation is relative_std * x.std()"""
return np.random.randn(*x.shape) * relative_std * x.std(0)
def balanced_sample_singles(features, dir_labels, mod_labels, n_per_class):
# Should have 1-hot vector labels
assert dir_labels.ndim == 2
assert mod_labels.ndim == 2
# check these are all singles
items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == 0
labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1)
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
n_needed = n_per_class
selected_idx = []
while True:
if n_needed >= len(idx):
# Take all items in this class 1 more time
selected_idx.append(idx)
n_needed -= len(idx)
else:
# Take the remaining items randomly
selected_idx.append(np.random.choice(idx, n_needed, replace=False))
break
selected_idx = np.concatenate(selected_idx)
res_x.append(features[selected_idx])
res_y_dir.append(dir_labels[selected_idx])
res_y_mod.append(mod_labels[selected_idx])
return np.concatenate(res_x), np.concatenate(res_y_dir), np.concatenate(res_y_mod)
def sample_singles_gmm(features, dir_labels, mod_labels, n_per_class, n_components):
"""Fit a GMM to each class, then sample as requested"""
assert dir_labels.ndim == 2
assert mod_labels.ndim == 2
# check these are all singles
items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == 0
labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1)
# For each class, fit a multivariate gaussian and sample the requested number of points
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
# NOTE - d and m are now integer values. We need to convert them to 1-hot vectors for the output
d_onehot = np.zeros(dir_labels.shape[1])
d_onehot[d] = 1
m_onehot = np.zeros(mod_labels.shape[1])
m_onehot[m] = 1
idx = np.where((labels_2d == (d, m)).all(-1))[0]
gmm = GaussianMixture(n_components=n_components)
gmm.fit(features[idx])
res_x.append(gmm.sample(n_per_class)[0])
res_y_dir.append(np.tile(d_onehot, (n_per_class, 1)))
res_y_mod.append(np.tile(m_onehot, (n_per_class, 1)))
return np.concatenate(res_x), np.concatenate(res_y_dir), np.concatenate(res_y_mod)
def sample_singles_kde(features, dir_labels, mod_labels, n_per_class, bandwidth):
"""Fit a GMM to each class, then sample as requested"""
assert dir_labels.ndim == 2
assert mod_labels.ndim == 2
# check these are all singles
items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == 0
labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1)
# For each class, fit a multivariate gaussian and sample the requested number of points
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
# NOTE - d and m are now integer values. We need to convert them to 1-hot vectors for the output
d_onehot = np.zeros(dir_labels.shape[1])
d_onehot[d] = 1
m_onehot = np.zeros(mod_labels.shape[1])
m_onehot[m] = 1
idx = np.where((labels_2d == (d, m)).all(-1))[0]
kde = KernelDensity(bandwidth=bandwidth)
kde.fit(features[idx])
res_x.append(kde.sample(n_per_class))
res_y_dir.append(np.tile(d_onehot, (n_per_class, 1)))
res_y_mod.append(np.tile(m_onehot, (n_per_class, 1)))
return np.concatenate(res_x), np.concatenate(res_y_dir), np.concatenate(res_y_mod)
def get_augmented_singles(
method: str, n_per_class: int, features: np.ndarray, dir_labels: np.ndarray, mod_labels: np.ndarray
):
if method == "none":
logger.info("No augmented singles")
# Return empties so we can just concatenate and not worry about it
features_aug = np.empty((0, *features.shape[1:]))
dir_labels_aug = np.empty((0, *dir_labels.shape[1:]))
mod_labels_aug = np.empty((0, *mod_labels.shape[1:]))
return features_aug, dir_labels_aug, mod_labels_aug
logger.info(f"Augmenting singles with method {method}")
if method.startswith("add-gaussian"):
# First, choose a subset of items according to n_per_class
features, dir_labels_aug, mod_labels_aug = balanced_sample_singles(
features, dir_labels, mod_labels, n_per_class
)
if method == "add-gaussian-0.05":
factor = 0.05
elif method == "add-gaussian-0.1":
factor = 0.1
elif method == "add-gaussian-0.2":
factor = 0.2
elif method == "add-gaussian-0.3":
factor = 0.3
elif method == "add-gaussian-0.4":
factor = 0.4
elif method == "add-gaussian-0.5":
factor = 0.5
elif method == "add-gaussian-0.6":
factor = 0.6
else:
raise ValueError(f"Unknown gaussian factor: {method}")
features_aug = features + get_noise_simple(features, factor)
elif method.startswith("fit-gmm"):
if method == "fit-gmm-1":
nc = 1
elif method == "fit-gmm-3":
nc = 3
elif method == "fit-gmm-5":
nc = 5
elif method == "fit-gmm-10":
nc = 10
features_aug, dir_labels_aug, mod_labels_aug = sample_singles_gmm(
features, dir_labels, mod_labels, n_per_class, n_components=nc
)
elif method.startswith("fit-kde"):
if method == "fit-kde-gaussian-scott":
bandwidth = "scott"
if method == "fit-kde-gaussian-silverman":
bandwidth = "silverman"
if method == "fit-kde-gaussian-0.01":
bandwidth = 0.01
if method == "fit-kde-gaussian-0.1":
bandwidth = 0.1
if method == "fit-kde-gaussian-1.0":
bandwidth = 1.0
if method == "fit-kde-gaussian-10.0":
bandwidth = 10.0
features_aug, dir_labels_aug, mod_labels_aug = sample_singles_kde(
features, dir_labels, mod_labels, n_per_class, bandwidth=bandwidth
)
else:
raise NotImplementedError()
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Augmented singles class sizes: {class_sizes}")
return features_aug, dir_labels_aug, mod_labels_aug
def get_clf(name: str, num_classes: int):
if name == "mlp":
return make_pipeline(
RobustScaler(), MLPClassifier(hidden_layer_sizes=[100, 100, 100], early_stopping=True, max_iter=200)
)
elif name == "logr":
return make_pipeline(RobustScaler(), LogisticRegression(class_weight="balanced", max_iter=2000, n_jobs=-1))
elif name == "svc":
return make_pipeline(RobustScaler(), SVC(class_weight="balanced", probability=True))
elif name == "rf":
return make_pipeline(RobustScaler(), RandomForestClassifier(class_weight="balanced", n_jobs=-1))
elif name == "knn":
return make_pipeline(RobustScaler(), KNeighborsClassifier())
elif name == "lda":
return make_pipeline(RobustScaler(), LinearDiscriminantAnalysis())
elif name == "gbc":
return make_pipeline(RobustScaler(), GradientBoostingClassifier())
else:
raise ValueError(f"Unknown model name: {name}")
def balance_classes(train_features, train_dir_labels, train_mod_labels):
# Subsample the "Rest" class since it will be overrepresented
assert train_dir_labels.ndim == 2
assert train_mod_labels.ndim == 2
labels_2d = np.stack([train_dir_labels.argmax(-1), train_mod_labels.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Before pruning 'Rest' items, class sizes: {class_sizes}")
rest_idx = np.where((labels_2d == [NO_DIR_IDX, NO_MOD_IDX]).all(-1))[0]
active_idx = np.where((labels_2d != [NO_DIR_IDX, NO_MOD_IDX]).any(-1))[0]
active_counts = np.unique(labels_2d[active_idx], axis=0, return_counts=True)[-1]
avg_n_active = int(np.mean(active_counts))
subset_rest_idx = np.random.choice(rest_idx, avg_n_active, replace=False)
res_x = np.concatenate((train_features[active_idx], train_features[subset_rest_idx]))
res_y_dir = np.concatenate((train_dir_labels[active_idx], train_dir_labels[subset_rest_idx]))
res_y_mod = np.concatenate((train_mod_labels[active_idx], train_mod_labels[subset_rest_idx]))
res_labels_2d = np.stack([res_y_dir.argmax(-1), res_y_mod.argmax(-1)], axis=-1)
res_class_sizes = np.unique(res_labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"After pruning 'Rest' items, class sizes: {res_class_sizes}")
return res_x, res_y_dir, res_y_mod
def remove_double_gestures(train_features, train_dir_labels, train_mod_labels):
labels_2d = np.stack([train_dir_labels.argmax(-1), train_mod_labels.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Before removing double gestures, class sizes: {class_sizes}")
items_with_dir = train_dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = train_mod_labels.argmax(-1) != NO_MOD_IDX
# Remove items with both direction and modifier
singles_idx = ~np.logical_and(items_with_dir, items_with_mod)
res_features = train_features[singles_idx]
res_dir_labels = train_dir_labels[singles_idx]
res_mod_labels = train_mod_labels[singles_idx]
res_labels_2d = np.stack([res_dir_labels.argmax(-1), res_mod_labels.argmax(-1)], axis=-1)
res_class_sizes = np.unique(res_labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"After removing double gestures, class sizes: {res_class_sizes}")
return res_features, res_dir_labels, res_mod_labels
@logger.catch(onerror=lambda _: sys.exit(1))
def run_training(
subject: str,
parallel_model_type: str,
clf_name: str,
doubles_method: str,
fraction_doubles_per_class: float,
singles_method: str,
rel_fraction_singles_per_class: float,
include_doubles_in_train: bool,
feature_combine_type: str,
):
# We don't want to modify code in the gest module itself.
# Thus, we'll do augmentation manually here, and tell the model not to do
# any further augmentation.
# Load train data
data_dict = load_data_dict()
try:
data = data_dict[subject]
except KeyError:
raise ValueError(f"Unknown subject: {subject}")
train_features = data["Calibration_features"]
train_dir_labels = data["Calibration_dir_labels"]
train_mod_labels = data["Calibration_mod_labels"]
# First, reduce amount of "Rest" items in train set
train_features, train_dir_labels, train_mod_labels = balance_classes(
train_features, train_dir_labels, train_mod_labels
)
# Remove any double gestures that occured due to bad participant behavior
train_features, train_dir_labels, train_mod_labels = remove_double_gestures(
train_features, train_dir_labels, train_mod_labels
)
# NOTE - we use HoldPulse1_NoFeedback and SimultaneousPulse1_NoFeedback for train set in the "upper bound"
# otherwise, these blocks are not used
# Load test data
if include_doubles_in_train:
# We use blocks 1 and 2 of the "NoFeedBack" portion of experiment
# Double check that we're not using augmentation
assert doubles_method == "none"
assert singles_method == "none"
# Add real combos to train set
train_features = np.concatenate(
[
train_features,
data["HoldPulse1_NoFeedBack_features"],
data["SimultaneousPulse1_NoFeedBack_features"],
data["HoldPulse2_NoFeedBack_features"],
data["SimultaneousPulse2_NoFeedBack_features"],
]
)
train_dir_labels = np.concatenate(
[
train_dir_labels,
data["HoldPulse1_NoFeedBack_dir_labels"],
data["SimultaneousPulse1_NoFeedBack_dir_labels"],
data["HoldPulse2_NoFeedBack_dir_labels"],
data["SimultaneousPulse2_NoFeedBack_dir_labels"],
]
)
train_mod_labels = np.concatenate(
[
train_mod_labels,
data["HoldPulse1_NoFeedBack_mod_labels"],
data["SimultaneousPulse1_NoFeedBack_mod_labels"],
data["HoldPulse2_NoFeedBack_mod_labels"],
data["SimultaneousPulse2_NoFeedBack_mod_labels"],
]
)
logger.info(f"Initial train set: {train_features.shape=}, {train_dir_labels.shape=}, {train_mod_labels.shape=}")
# Don't use "Feedback" blocks for this analysis
test_blocks = ["HoldPulse3_NoFeedBack", "SimultaneousPulse3_NoFeedBack"]
test_features = np.concatenate([data[f"{block}_features"] for block in test_blocks])
test_dir_labels = np.concatenate([data[f"{block}_dir_labels"] for block in test_blocks])
test_mod_labels = np.concatenate([data[f"{block}_mod_labels"] for block in test_blocks])
logger.info(f"test set: {test_features.shape=}, {test_dir_labels.shape=}, {test_mod_labels.shape=}")
# Vary strategy for augmented doubles
double_features_aug, double_dir_labels_aug, double_mod_labels_aug = get_augmented_doubles(
doubles_method,
feature_combine_type,
fraction_doubles_per_class,
train_features,
train_dir_labels,
train_mod_labels,
)
# Make augmented singles
# Figure out how many doubles per class. Take avg and then apply rel_fraction_singles_per_class to
# get the number of singles per class
n_singles_per_class = 0
if singles_method != "none":
doubles_labels_2d = np.stack((double_dir_labels_aug.argmax(-1), double_mod_labels_aug.argmax(-1)), axis=-1)
class_sizes = np.unique(doubles_labels_2d, axis=0, return_counts=True)[-1]
n_singles_per_class = int(np.round(np.mean(class_sizes) * rel_fraction_singles_per_class))
single_features_aug, single_dir_labels_aug, single_mod_labels_aug = get_augmented_singles(
singles_method, n_singles_per_class, train_features, train_dir_labels, train_mod_labels
)
# Merge all train data
train_features = np.concatenate([train_features, double_features_aug, single_features_aug])
train_dir_labels = np.concatenate([train_dir_labels, double_dir_labels_aug, single_dir_labels_aug])
train_mod_labels = np.concatenate([train_mod_labels, double_mod_labels_aug, single_mod_labels_aug])
logger.info(f"Augmented train set: {train_features.shape=}, {train_dir_labels.shape=}, {train_mod_labels.shape=}")
# Create model
if parallel_model_type == "ParallelA":
model = ParallelA(
get_clf(clf_name, num_classes=5),
get_clf(clf_name, num_classes=3),
use_augmentation=False,
include_rest_data_for_clf=True,
)
elif parallel_model_type == "ParallelB":
model = ParallelB(
dir_clf=get_clf(clf_name, num_classes=4),
mod_clf=get_clf(clf_name, num_classes=2),
has_dir_clf=get_clf(clf_name, num_classes=2),
has_mod_clf=get_clf(clf_name, num_classes=2),
use_augmentation=False,
# include_rest_data_for_clf=True, # NOTE - always using true, flag is not in model
)
elif parallel_model_type == "SerialControl":
model = get_clf(clf_name, num_classes=15)
else:
raise ValueError(f"Unknown parallel model type: {parallel_model_type}")
# Train
logger.info("Train...")
if parallel_model_type == "SerialControl":
# Convert labels to integer by making 2-digit numbers,
# where the 10s place is the dir label and the 1s place is the mod label
train_labels = train_dir_labels.argmax(-1) * 10 + train_mod_labels.argmax(-1)
model.fit(train_features, train_labels)
else:
model.fit(train_features, train_dir_labels, train_mod_labels)
# Evaluate
logger.info("Evaluate")
if parallel_model_type == "SerialControl":
combined_preds = model.predict(test_features)
dir_preds = combined_preds // 10
mod_preds = combined_preds % 10
else:
dir_preds, mod_preds = model.predict(test_features)
preds_2d = np.stack([dir_preds, mod_preds], axis=-1)
true_labels_2d = np.stack([test_dir_labels.argmax(-1), test_mod_labels.argmax(-1)], axis=-1)
return confusion_matrix(true_labels_2d, preds_2d)
if __name__ == "__main__":
logger.remove()
logger.add(sys.stdout, level="INFO", colorize=True)
parser = argparse.ArgumentParser()
parser.add_argument("--subject", type=str, required=True)
parser.add_argument("--seed", type=int, required=True)
parser.add_argument("--parallel_model_type", choices=["ParallelA", "ParallelB", "SerialControl"], required=True)
clf_names = ["mlp", "rf", "logr"]
parser.add_argument("--clf_name", type=str, choices=clf_names, required=True)
doubles_methods = [
"none",
"subset_uniform",
"subset_near_mean",
"subset_spaced_quantiles",
"subsetInput_uniform",
"subsetInput_near_mean",
"subsetInput_spaced_quantiles",
"all",
]
parser.add_argument("--doubles_method", type=str, choices=doubles_methods, required=True)
parser.add_argument("--fraction_doubles_per_class", type=float, required=True)
singles_methods = [
"none",
"add-gaussian-0.3",
"add-gaussian-0.4",
"add-gaussian-0.5",
"fit-gmm-1",
"fit-gmm-5",
"fit-gmm-10",
"fit-kde-gaussian-silverman",
"fit-kde-gaussian-0.01",
"fit-kde-gaussian-0.1",
"fit-kde-gaussian-1.0",
]
parser.add_argument("--singles_method", type=str, choices=singles_methods, required=True)
parser.add_argument("--rel_fraction_singles_per_class", type=float, required=True) | parser.add_argument("--include_doubles_in_train", type=str2bool, required=True) | 10 | 2023-12-12 16:50:34+00:00 | 12k |
ebb-earl-co/tidal-wave | tidal_wave/album.py | [
{
"identifier": "AudioFormat",
"path": "tidal_wave/media.py",
"snippet": "class AudioFormat(str, Enum):\n sony_360_reality_audio = \"360\"\n dolby_atmos = \"Atmos\"\n hi_res = \"HiRes\"\n mqa = \"MQA\"\n lossless = \"Lossless\"\n high = \"High\"\n low = \"Low\""
},
{
"identifier": "AlbumsEndpointResponseJSON",
"path": "tidal_wave/models.py",
"snippet": "class AlbumsEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"This dataclass is the `dataclass-wizard`-generated class that represents\n the JSON response from https://api.tidal.com/v1/albums/<ALBUMID>\"\"\"\n\n id: int = field(repr=False)\n title: str\n duration: int\n number_of_tracks: int\n number_of_volumes: int = field(repr=False)\n release_date: date\n copyright: str = field(repr=False)\n type: str\n version: Optional[str]\n url: str\n cover: str = field(repr=False)\n explicit: bool\n upc: Union[int, str]\n audio_quality: str\n audio_modes: List[str]\n media_metadata: \"MediaMetadata\" = field(repr=False)\n artist: \"Artist\" = field(repr=False)\n artists: List[\"Artist\"]\n\n def __post_init__(self):\n self.cover_url: str = IMAGE_URL % f\"{self.cover.replace('-', '/')}/1280x1280\"\n self.name: str = (\n self.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n )"
},
{
"identifier": "AlbumsItemsResponseJSON",
"path": "tidal_wave/models.py",
"snippet": "class AlbumsItemsResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"This class represents the JSON response from the TIDAL API endpoint\n /albums/<ID>/items. It is a list of TracksEndpointResponseJSON objects,\n with a bit of metadata based on the query parameters (offset and limit;\n i.e. pagination logic).\"\"\"\n\n limit: int = field(repr=None)\n offset: int = field(repr=None)\n total_number_of_items: int\n items: List[\"AlbumsItemsResponseJSONItem\"]"
},
{
"identifier": "AlbumsReviewResponseJSON",
"path": "tidal_wave/models.py",
"snippet": "class AlbumsReviewResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"This class represents the JSON response from the TIDAL API endpoint\n /albums/<ID>/review.\"\"\"\n\n source: str\n last_updated: Annotated[\n datetime, dataclass_wizard.Pattern(\"%Y-%m-%dT%H:%M:%S.%f%z\")\n ]\n text: str = field(repr=None)\n summary: str = field(repr=None)"
},
{
"identifier": "request_albums",
"path": "tidal_wave/requesting.py",
"snippet": "def request_albums(\n session: Session, identifier: int\n) -> Optional[AlbumsEndpointResponseJSON]:\n return requester_maker(\n session=session,\n endpoint=\"albums\",\n identifier=identifier,\n headers={\"Accept\": \"application/json\"},\n subclass=AlbumsEndpointResponseJSON,\n )"
},
{
"identifier": "request_album_items",
"path": "tidal_wave/requesting.py",
"snippet": "def request_album_items(\n session: Session, identifier: int\n) -> Optional[AlbumsItemsResponseJSON]:\n return requester_maker(\n session=session,\n endpoint=\"albums\",\n identifier=identifier,\n headers={\"Accept\": \"application/json\"},\n parameters={\"limit\": 100},\n url_end=\"/items\",\n subclass=AlbumsItemsResponseJSON,\n )"
},
{
"identifier": "request_album_review",
"path": "tidal_wave/requesting.py",
"snippet": "def request_album_review(\n session: Session, identifier: int\n) -> Optional[AlbumsReviewResponseJSON]:\n return requester_maker(\n session=session,\n endpoint=\"albums\",\n identifier=identifier,\n headers={\"Accept\": \"application/json\"},\n url_end=\"/review\",\n subclass=AlbumsReviewResponseJSON,\n )"
},
{
"identifier": "Track",
"path": "tidal_wave/track.py",
"snippet": "class Track:\n track_id: int\n\n def __post_init__(self):\n self._has_lyrics: Optional[bool] = None\n self.tags: dict = {}\n self.album_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n self.metadata: Optional[TracksEndpointResponseJSON] = request_tracks(\n session, self.track_id\n )\n\n def get_album(self, session: Session):\n self.album: Optional[AlbumsEndpointResponseJSON] = request_albums(\n session, self.metadata.album.id\n )\n\n def get_credits(self, session: Session):\n self.credits: Optional[TracksCreditsResponseJSON] = request_credits(\n session, self.track_id\n )\n\n def get_lyrics(self, session: Session):\n if self._has_lyrics is None:\n self.lyrics: Optional[TracksLyricsResponseJSON] = request_lyrics(\n session, self.track_id\n )\n if self.lyrics is None:\n self._has_lyrics = False\n else:\n self._has_lyrics = True\n else:\n return self.lyrics\n\n def get_stream(self, session: Session, audio_format: AudioFormat):\n \"\"\"Populates self.stream, self.manifest\"\"\"\n aq: Optional[str] = af_aq.get(audio_format)\n self.stream: Optional[TracksEndpointStreamResponseJSON] = request_stream(\n session, self.track_id, aq\n )\n\n def set_manifest(self):\n \"\"\"This method sets self.manifest and self.codec\"\"\"\n self.manifest: Manifest = manifester(self.stream)\n # https://dashif.org/codecs/audio/\n if self.manifest.codecs == \"flac\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mqa\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mha1\": # Sony 360 Reality Audio\n self.codec = \"mka\"\n elif self.manifest.codecs == \"mp4a.40.5\": # HE-AAC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.29\": # HE-AAC v2\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.2\": # AAC-LC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"eac3\": # Enhanced AC-3\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.34\": # MP3\n self.codec = \"mp3\"\n\n def set_album_dir(self, out_dir: Path):\n \"\"\"This method sets self.album_dir, based on self.album and\n out_dir. In particular, self.album_dir is a subdirectory of out_dir\n based on the name of the album's artist\"\"\"\n artist_substring: str = self.album.artist.name.replace(\"..\", \"\")\n album_substring: str = (\n f\"{self.album.name} \" f\"[{self.album.id}] [{self.album.release_date.year}]\"\n )\n self.album_dir: Path = out_dir / artist_substring / album_substring\n self.album_dir.mkdir(parents=True, exist_ok=True)\n\n if self.album.number_of_volumes > 1:\n volume_substring: str = f\"Volume {self.metadata.volume_number}\"\n (self.album_dir / volume_substring).mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, audio_format: AudioFormat):\n \"\"\"This method sets self.filename. It's based on self.metadata\n as well as audio_format. Additionally, if the available codecs in\n self.manifest don't match audio_format, warnings are logged\"\"\"\n _track_part: str = f\"{self.metadata.track_number:02d} - {self.metadata.name}\"\n if audio_format == AudioFormat.low:\n track_substring: str = f\"{_track_part} [L]\"\n elif audio_format == AudioFormat.high:\n track_substring: str = f\"{_track_part} [H]\"\n elif audio_format == AudioFormat.lossless:\n track_substring: str = f\"{_track_part} [CD]\"\n elif audio_format == AudioFormat.mqa:\n track_substring: str = f\"{_track_part} [Q]\"\n elif audio_format == AudioFormat.hi_res:\n track_substring: str = f\"{_track_part} [HiRes]\"\n elif audio_format == AudioFormat.dolby_atmos:\n track_substring: str = f\"{_track_part} [A]\"\n elif audio_format == AudioFormat.sony_360_reality_audio:\n track_substring: str = f\"{_track_part} [360]\"\n else:\n track_substring: str = _track_part\n\n # Check for MQA masquerading as HiRes here\n if audio_format == AudioFormat.hi_res:\n if self.manifest.codecs == \"mqa\":\n logger.warning(\n \"Even though HiRes audio format was requested, this track is only \"\n \"available in MQA format. TIDAL regards this as 'HiRes' even though \"\n \"it is probably only lossless; i.e. 16-bit 44.1 kHz quality. \"\n \"Downloading of track will continue, but it will be marked as MQA.\"\n )\n self.filename: Optional[str] = f\"{_track_part} [Q].{self.codec}\"\n elif (self.stream.bit_depth == 16) and (self.stream.sample_rate == 44100):\n logger.warning(\n \"Even though HiRes audio format was requested, and TIDAL responded to \"\n \"that request without error, this track is only available in lossless \"\n \"format; i.e. 16-bit 44.1 kHz quality. Downloading of track will \"\n \"continue, but it will be marked as Lossless ([CD]).\"\n )\n self.filename: Optional[str] = f\"{_track_part} [CD].{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n\n # for use in playlist file ordering\n self.trackname: str = re.match(r\"(?:\\d{2,3} - )(.+?$)\", self.filename).groups()[\n 0\n ]\n\n def set_outfile(self):\n \"\"\"Uses self.album_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n if self.album.number_of_volumes > 1:\n self.outfile: Path = (\n self.album_dir / f\"Volume {self.metadata.volume_number}\" / self.filename\n )\n self.absolute_outfile = str(self.outfile.absolute())\n else:\n self.outfile: Path = self.album_dir / self.filename\n self.absolute_outfile = str(self.outfile.absolute())\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Track {self.absolute_outfile} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def save_artist_image(self, session: Session):\n \"\"\"This method writes a JPEG file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_image: Path = (\n self.album_dir / f\"{a.name.replace('..', '')}.jpg\"\n )\n if not track_artist_image.exists():\n download_artist_image(session, a, self.album_dir)\n\n def save_artist_bio(self, session: Session):\n \"\"\"This method writes a JSON file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_bio_json: Path = self.album_dir / f\"{a.name}-bio.json\"\n if not track_artist_bio_json.exists():\n artist_bio: Optional[ArtistsBioResponseJSON] = request_artist_bio(\n session, a.id\n )\n if artist_bio is not None:\n logger.info(\n f\"Writing artist bio for artist {a.id} to \"\n f\"'{str(track_artist_bio_json.absolute())}\"\n )\n track_artist_bio_json.write_text(artist_bio.to_json())\n\n def save_album_cover(self, session: Session):\n \"\"\"This method saves cover.jpg to self.album_dir; the bytes for cover.jpg\n come from self.album.cover\"\"\"\n self.cover_path: Path = self.album_dir / \"cover.jpg\"\n if (not self.cover_path.exists()) or (not self.album_cover_saved):\n download_cover_image(\n session=session, cover_uuid=self.album.cover, output_dir=self.album_dir\n )\n else:\n self.album_cover_saved = True\n\n def set_urls(self, session: Session):\n \"\"\"This method sets self.urls based on self.manifest\"\"\"\n if isinstance(self.manifest, JSONDASHManifest):\n self.urls: List[str] = self.manifest.urls\n elif isinstance(self.manifest, XMLDASHManifest):\n self.urls: List[str] = self.manifest.build_urls(session=session)\n self.download_headers: Dict[str, str] = {\"Accept\": self.manifest.mime_type}\n if session.session_id is not None:\n self.download_headers[\"sessionId\"] = session.session_id\n self.download_params = {k: None for k in session.params}\n\n def download_url(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method downloads self.urls[0], for use in situations when\n the manifest returned by TIDAL API contains one URL. It relies on\n byte range headers to incrementally get all content from a URL\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n # Implement HTTP range requests here to mimic official clients\n range_size: int = 1024 * 1024 # 1 MiB\n content_length: int = fetch_content_length(\n session=session, url=self.urls[0]\n )\n if content_length == 0:\n return\n\n range_headers: Iterable[str] = http_request_range_headers(\n content_length=content_length,\n range_size=range_size,\n return_tuple=False,\n )\n for rh in range_headers:\n with session.get(\n self.urls[0], params=self.download_params, headers={\"Range\": rh}\n ) as rr:\n if not rr.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(rr.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFMPEG to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile,\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(\n f\"Track {self.track_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def download_urls(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method writes the contents from self.urls to a temporary\n directory, then uses FFmpeg to re-mux the data to self.outfile\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=self.download_headers, params=self.download_params\n ) as resp:\n if not resp.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(resp.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFmpeg to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile, acodec=\"copy\", loglevel=\"quiet\"\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(f\"Track {self.track_id} written to '{self.absolute_outfile}'\")\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method GETs the data from self.urls and writes it\n to self.outfile.\"\"\"\n if len(self.urls) == 1:\n outfile: Optional[Path] = self.download_url(\n session=session, out_dir=out_dir\n )\n else:\n outfile: Optional[Path] = self.download_urls(\n session=session, out_dir=out_dir\n )\n\n return outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary,\n write the correct values of various metadata tags to the file.\n E.g. for .flac files, the album's artist is 'ALBUMARTIST',\n but for .m4a files, the album's artist is 'aART'.\"\"\"\n tags = dict()\n if (self.codec == \"flac\") or (self.codec == \"mka\"):\n tag_map = {k: v[\"flac\"] for k, v in TAG_MAPPING.items()}\n elif self.codec == \"m4a\":\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"album\"]] = self.album.title\n tags[tag_map[\"album_artist\"]] = \";\".join((a.name for a in self.album.artists))\n tags[tag_map[\"album_peak_amplitude\"]] = f\"{self.stream.album_peak_amplitude}\"\n tags[tag_map[\"album_replay_gain\"]] = f\"{self.stream.album_replay_gain}\"\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"barcode\"]] = self.album.upc\n tags[tag_map[\"comment\"]] = self.metadata.url\n tags[tag_map[\"copyright\"]] = self.metadata.copyright\n tags[tag_map[\"date\"]] = str(self.album.release_date)\n tags[tag_map[\"isrc\"]] = self.metadata.isrc\n tags[tag_map[\"title\"]] = self.metadata.name\n tags[tag_map[\"track_peak_amplitude\"]] = f\"{self.metadata.peak}\"\n tags[tag_map[\"track_replay_gain\"]] = f\"{self.metadata.replay_gain}\"\n # credits\n for tag in {\"composer\", \"engineer\", \"lyricist\", \"mixer\", \"producer\", \"remixer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.credits, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n # lyrics\n try:\n _lyrics = self.lyrics.subtitles\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[tag_map[\"lyrics\"]] = _lyrics\n\n if self.codec == \"flac\":\n # track and disk\n tags[\"DISCTOTAL\"] = f\"{self.album.number_of_volumes}\"\n tags[\"DISC\"] = f\"{self.metadata.volume_number}\"\n tags[\"TRACKTOTAL\"] = f\"{self.album.number_of_tracks}\"\n tags[\"TRACKNUMBER\"] = f\"{self.metadata.track_number}\"\n # instrument-specific\n # piano\n try:\n piano_credits: List[str] = [\n f\"{pc} (piano)\" for pc in self.credits.piano\n ]\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[\"PERFORMER\"] = piano_credits\n\n elif self.codec == \"m4a\":\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n tags[\"trkn\"] = [(self.metadata.track_number, self.album.number_of_tracks)]\n tags[\"disk\"] = [(self.metadata.volume_number, self.album.number_of_volumes)]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n # add album cover\n if self.codec == \"flac\":\n p = mutagen.flac.Picture()\n p.type = mutagen.id3.PictureType.COVER_FRONT\n p.desc = \"Album Cover\"\n p.width = p.height = 1280\n p.mime = \"image/jpeg\"\n p.data = self.cover_path.read_bytes()\n self.mutagen.add_picture(p)\n elif self.codec == \"m4a\":\n self.mutagen[\"covr\"] = [\n MP4Cover(self.cover_path.read_bytes(), imageformat=MP4Cover.FORMAT_JPEG)\n ]\n\n self.mutagen.save()\n # Make sure audio track comes first because of\n # less-sophisticated audio players that only\n # recognize the first stream\n if self.codec == \"flac\":\n with temporary_file(suffix=\".mka\") as tf:\n shutil.move(str(self.outfile.absolute()), tf.name)\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{tf.name}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy\n -metadata:s:v title='Album cover' -metadata:s:v comment='Cover (front)'\n -disposition:v attached_pic \"{self.absolute_outfile}\" \"\"\"\n )\n subprocess.run(cmd)\n elif self.codec == \"m4a\":\n with temporary_file(suffix=\".mka\") as tf:\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{self.absolute_outfile}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy \"{tf.name}\" \"\"\"\n )\n subprocess.run(cmd)\n shutil.copyfile(tf.name, self.absolute_outfile)\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n metadata: Optional[TracksEndpointResponseJSON] = None,\n album: Optional[AlbumsEndpointResponseJSON] = None,\n ) -> Optional[str]:\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n self.outfile = None\n return\n\n if \"DOLBY_ATMOS\" in self.metadata.media_metadata.tags:\n if audio_format != AudioFormat.dolby_atmos:\n logger.warning(\n f\"Track {self.track_id} is only available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if audio_format == AudioFormat.dolby_atmos:\n if \"DOLBY_ATMOS\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Dolby Atmos audio format was requested, but track \"\n f\"{self.track_id} is not available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.sony_360_reality_audio:\n if \"SONY_360RA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Sony 360 Reality Audio audio format was requested, but track \"\n f\"{self.track_id} is not available in Sony 360 Reality Audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.mqa:\n if \"MQA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"MQA audio format was requested, but track \"\n f\"{self.track_id} is not available in MQA audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if album is None:\n self.get_album(session)\n else:\n self.album = album\n\n if self.album is None:\n self.outfile = None\n return\n\n self.get_credits(session)\n self.get_stream(session, audio_format)\n if self.stream is None:\n return\n self.set_manifest()\n self.set_album_dir(out_dir)\n self.set_filename(audio_format)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return\n\n try:\n self.get_lyrics(session)\n except Exception:\n pass\n\n self.save_album_cover(session)\n\n try:\n self.save_artist_image(session)\n except Exception:\n pass\n\n try:\n self.save_artist_bio(session)\n except Exception:\n pass\n\n self.set_urls(session)\n\n if self.download(session, out_dir) is None:\n return\n\n self.craft_tags()\n self.set_tags()\n\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dump({k: v}, fp)\n return None\n\n def dumps(self) -> str:\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dumps({k: v})\n return None"
},
{
"identifier": "download_cover_image",
"path": "tidal_wave/utils.py",
"snippet": "def download_cover_image(\n session: Session,\n cover_uuid: str,\n output_dir: Path,\n file_name: str = \"cover.jpg\",\n dimension: Union[int, Tuple[int]] = 1280,\n) -> Optional[Path]:\n \"\"\"Given a UUID that corresponds to a (JPEG) image on Tidal's servers,\n download the image file and write it as 'cover.jpeg' or 'cover.png'\n in the directory `path_to_output_dir`. Returns path to downloaded file\"\"\"\n cover_url_part: str = cover_uuid.replace(\"-\", \"/\")\n if isinstance(dimension, int):\n _url: str = IMAGE_URL % f\"{cover_url_part}/{dimension}x{dimension}\"\n elif isinstance(dimension, tuple):\n _url: str = IMAGE_URL % f\"{cover_url_part}/{dimension[0]}x{dimension[1]}\"\n\n with session.get(url=_url, headers={\"Accept\": \"image/jpeg\"}) as r:\n if not r.ok:\n logger.warning(\n \"Could not retrieve data from Tidal resources/images URL \"\n f\"due to error code: {r.status_code}\"\n )\n logger.debug(r.reason)\n return\n else:\n bytes_to_write = BytesIO(r.content)\n\n if bytes_to_write is not None:\n output_file: Path = output_dir / file_name\n bytes_to_write.seek(0)\n output_file.write_bytes(bytes_to_write.read())\n bytes_to_write.close()\n return output_file"
}
] | from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional
from requests import Session
from .media import AudioFormat
from .models import (
AlbumsEndpointResponseJSON,
AlbumsItemsResponseJSON,
AlbumsReviewResponseJSON,
)
from .requesting import request_albums, request_album_items, request_album_review
from .track import Track
from .utils import download_cover_image
import json
import sys | 7,628 |
@dataclass
class Album:
album_id: int
def __post_init__(self):
self.album_dir: Optional[Path] = None
self.album_cover_saved: bool = False
def get_items(self, session: Session):
"""This method populates self.tracks by requesting from
TIDAL albums/items endpoint."""
album_items: AlbumsItemsResponseJSON = request_album_items(
session=session, identifier=self.album_id
)
_items = album_items.items if album_items is not None else ()
self.tracks = tuple(_item.item for _item in _items)
def get_metadata(self, session: Session):
"""This method populates self.metadata by requesting from
TIDAL /albums endpoint"""
self.metadata: AlbumsEndpointResponseJSON = request_albums(
session=session, identifier=self.album_id
)
def get_review(self, session: Session):
"""This method requests the review corresponding to self.album_id
in TIDAL. If it exists, it is written to disk as AlbumReview.json
in self.album_dir"""
self.album_review: Optional[AlbumsReviewResponseJSON] = request_album_review(
session=session, identifier=self.album_id
)
if self.album_review is not None:
(self.album_dir / "AlbumReview.json").write_text(
self.album_review.to_json()
)
def set_dir(self, out_dir: Path):
"""This method populates self.album_dir as a sub-subdirectory of
out_dir: its parent directory is the name of the (main) artist of
the album"""
artist_substring: str = self.metadata.artist.name.replace("..", "")
album_substring: str = (
f"{self.metadata.name.replace('..', '')} "
f"[{self.metadata.id}] [{self.metadata.release_date.year}]"
)
self.album_dir = out_dir / artist_substring / album_substring
self.album_dir.mkdir(parents=True, exist_ok=True)
if self.metadata.number_of_volumes > 1:
for v in range(1, self.metadata.number_of_volumes + 1):
volume_substring: str = f"Volume {v}"
(out_dir / artist_substring / album_substring / volume_substring).mkdir(
parents=True, exist_ok=True
)
def save_cover_image(self, session: Session, out_dir: Path):
"""This method writes cover.jpg in self.album_dir via the
utils.download_cover_image() function. If successful,
then self.album_cover_saved takes the value True"""
if self.album_dir is None:
self.set_dir(out_dir=out_dir)
self.cover_path: Path = self.album_dir / "cover.jpg"
if not self.cover_path.exists():
download_cover_image(
session=session,
cover_uuid=self.metadata.cover,
output_dir=self.album_dir,
)
else:
self.album_cover_saved = True
def get_tracks(
self, session: Session, audio_format: AudioFormat, out_dir: Path
) -> List[Optional[str]]:
"""This method uses self.tracks to call track.Track.get() for each
track in self.tracks. It uses the result of each of these calls to
populate self.track_files"""
track_files: List[str] = [None] * self.metadata.number_of_tracks
for i, t in enumerate(self.tracks): # type(t) is TracksEndpointResponseJSON
|
@dataclass
class Album:
album_id: int
def __post_init__(self):
self.album_dir: Optional[Path] = None
self.album_cover_saved: bool = False
def get_items(self, session: Session):
"""This method populates self.tracks by requesting from
TIDAL albums/items endpoint."""
album_items: AlbumsItemsResponseJSON = request_album_items(
session=session, identifier=self.album_id
)
_items = album_items.items if album_items is not None else ()
self.tracks = tuple(_item.item for _item in _items)
def get_metadata(self, session: Session):
"""This method populates self.metadata by requesting from
TIDAL /albums endpoint"""
self.metadata: AlbumsEndpointResponseJSON = request_albums(
session=session, identifier=self.album_id
)
def get_review(self, session: Session):
"""This method requests the review corresponding to self.album_id
in TIDAL. If it exists, it is written to disk as AlbumReview.json
in self.album_dir"""
self.album_review: Optional[AlbumsReviewResponseJSON] = request_album_review(
session=session, identifier=self.album_id
)
if self.album_review is not None:
(self.album_dir / "AlbumReview.json").write_text(
self.album_review.to_json()
)
def set_dir(self, out_dir: Path):
"""This method populates self.album_dir as a sub-subdirectory of
out_dir: its parent directory is the name of the (main) artist of
the album"""
artist_substring: str = self.metadata.artist.name.replace("..", "")
album_substring: str = (
f"{self.metadata.name.replace('..', '')} "
f"[{self.metadata.id}] [{self.metadata.release_date.year}]"
)
self.album_dir = out_dir / artist_substring / album_substring
self.album_dir.mkdir(parents=True, exist_ok=True)
if self.metadata.number_of_volumes > 1:
for v in range(1, self.metadata.number_of_volumes + 1):
volume_substring: str = f"Volume {v}"
(out_dir / artist_substring / album_substring / volume_substring).mkdir(
parents=True, exist_ok=True
)
def save_cover_image(self, session: Session, out_dir: Path):
"""This method writes cover.jpg in self.album_dir via the
utils.download_cover_image() function. If successful,
then self.album_cover_saved takes the value True"""
if self.album_dir is None:
self.set_dir(out_dir=out_dir)
self.cover_path: Path = self.album_dir / "cover.jpg"
if not self.cover_path.exists():
download_cover_image(
session=session,
cover_uuid=self.metadata.cover,
output_dir=self.album_dir,
)
else:
self.album_cover_saved = True
def get_tracks(
self, session: Session, audio_format: AudioFormat, out_dir: Path
) -> List[Optional[str]]:
"""This method uses self.tracks to call track.Track.get() for each
track in self.tracks. It uses the result of each of these calls to
populate self.track_files"""
track_files: List[str] = [None] * self.metadata.number_of_tracks
for i, t in enumerate(self.tracks): # type(t) is TracksEndpointResponseJSON | track: Track = Track(track_id=t.id) | 7 | 2023-12-12 21:50:25+00:00 | 12k |
Deltares/imod-python | imod/mf6/dis.py | [
{
"identifier": "Package",
"path": "imod/mf6/package.py",
"snippet": "class Package(PackageBase, abc.ABC):\n \"\"\"\n Package is used to share methods for specific packages with no time\n component.\n\n It is not meant to be used directly, only to inherit from, to implement new\n packages.\n\n This class only supports `array input\n <https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=16>`_,\n not the list input which is used in :class:`BoundaryCondition`.\n \"\"\"\n\n _pkg_id = \"\"\n _init_schemata = {}\n _write_schemata = {}\n\n def __init__(self, allargs=None):\n super().__init__(allargs)\n\n def isel(self):\n raise NotImplementedError(\n \"Selection on packages not yet supported. To make a selection on \"\n f\"the xr.Dataset, call {self._pkg_id}.dataset.isel instead.\"\n \"You can create a new package with a selection by calling \"\n f\"{__class__.__name__}(**{self._pkg_id}.dataset.isel(**selection))\"\n )\n\n def sel(self):\n raise NotImplementedError(\n \"Selection on packages not yet supported. To make a selection on \"\n f\"the xr.Dataset, call {self._pkg_id}.dataset.sel instead. \"\n \"You can create a new package with a selection by calling \"\n f\"{__class__.__name__}(**{self._pkg_id}.dataset.sel(**selection))\"\n )\n\n def _valid(self, value):\n \"\"\"\n Filters values that are None, False, or a numpy.bool_ False.\n Needs to be this specific, since 0.0 and 0 are valid values, but are\n equal to a boolean False.\n \"\"\"\n # Test singletons\n if value is False or value is None:\n return False\n # Test numpy bool (not singleton)\n elif isinstance(value, np.bool_) and not value:\n return False\n # When dumping to netCDF and reading back, None will have been\n # converted into a NaN. Only check NaN if it's a floating type to avoid\n # TypeErrors.\n elif np.issubdtype(type(value), np.floating) and np.isnan(value):\n return False\n else:\n return True\n\n @staticmethod\n def _number_format(dtype: type):\n if np.issubdtype(dtype, np.integer):\n return \"%i\"\n elif np.issubdtype(dtype, np.floating):\n return \"%.18G\"\n else:\n raise TypeError(\"dtype should be either integer or float\")\n\n @staticmethod\n def _initialize_template(pkg_id):\n loader = jinja2.PackageLoader(\"imod\", \"templates/mf6\")\n env = jinja2.Environment(loader=loader, keep_trailing_newline=True)\n if pkg_id == \"ims\":\n fname = \"sln-ims.j2\"\n elif pkg_id == \"tdis\":\n fname = \"sim-tdis.j2\"\n elif pkg_id in TRANSPORT_PACKAGES:\n fname = f\"gwt-{pkg_id}.j2\"\n elif pkg_id in EXCHANGE_PACKAGES:\n fname = f\"exg-{pkg_id}.j2\"\n else:\n fname = f\"gwf-{pkg_id}.j2\"\n return env.get_template(fname)\n\n def write_blockfile(self, pkgname, globaltimes, write_context: WriteContext):\n directory = write_context.get_formatted_write_directory()\n\n content = self.render(\n directory=directory,\n pkgname=pkgname,\n globaltimes=globaltimes,\n binary=write_context.use_binary,\n )\n filename = write_context.write_directory / f\"{pkgname}.{self._pkg_id}\"\n with open(filename, \"w\") as f:\n f.write(content)\n\n def write_binary_griddata(self, outpath, da, dtype):\n # From the modflow6 source, the header is defined as:\n # integer(I4B) :: kstp --> np.int32 : 1\n # integer(I4B) :: kper --> np.int32 : 2\n # real(DP) :: pertim --> 2 * np.int32 : 4\n # real(DP) :: totim --> 2 * np.int32 : 6\n # character(len=16) :: text --> 4 * np.int32 : 10\n # integer(I4B) :: m1, m2, m3 --> 3 * np.int32 : 13\n # so writing 13 bytes suffices to create a header.\n\n # The following code is commented out due to modflow issue 189\n # https://github.com/MODFLOW-USGS/modflow6/issues/189\n # We never write LAYERED data.\n # The (structured) dis array reader results in an error if you try to\n # read a 3D botm array. By storing nlayer * nrow * ncol in the first\n # header entry, the array is read properly.\n\n # haslayer = \"layer\" in da.dims\n # if haslayer:\n # nlayer, nrow, ncol = da.shape\n # else:\n # nrow, ncol = da.shape\n # nlayer = 1\n\n # This is a work around for the abovementioned issue.\n nval = np.product(da.shape)\n header = np.zeros(13, np.int32)\n header[-3] = np.int32(nval) # ncol\n header[-2] = np.int32(1) # nrow\n header[-1] = np.int32(1) # nlayer\n\n with open(outpath, \"w\") as f:\n header.tofile(f)\n da.values.flatten().astype(dtype).tofile(f)\n\n def write_text_griddata(self, outpath, da, dtype):\n with open(outpath, \"w\") as f:\n # Note: reshaping here avoids writing newlines after every number.\n # This dumps all the values in a single row rather than a single\n # column. This is to be preferred, since editors can easily\n # \"reshape\" a long row with \"word wrap\"; they cannot as easily\n # ignore newlines.\n fmt = self._number_format(dtype)\n data = da.values\n if data.ndim > 2:\n np.savetxt(fname=f, X=da.values.reshape((1, -1)), fmt=fmt)\n else:\n np.savetxt(fname=f, X=da.values, fmt=fmt)\n\n def render(self, directory, pkgname, globaltimes, binary):\n d = {}\n if directory is None:\n pkg_directory = pkgname\n else:\n pkg_directory = pathlib.Path(directory) / pkgname\n\n for varname in self.dataset.data_vars:\n key = self._keyword_map.get(varname, varname)\n\n if hasattr(self, \"_grid_data\") and varname in self._grid_data:\n layered, value = self._compose_values(\n self.dataset[varname], pkg_directory, key, binary=binary\n )\n if self._valid(value): # skip False or None\n d[f\"{key}_layered\"], d[key] = layered, value\n else:\n value = self[varname].values[()]\n if self._valid(value): # skip False or None\n d[key] = value\n\n if (hasattr(self, \"_auxiliary_data\")) and (names := get_variable_names(self)):\n d[\"auxiliary\"] = names\n\n return self._template.render(d)\n\n @staticmethod\n def _is_xy_data(obj):\n if isinstance(obj, (xr.DataArray, xr.Dataset)):\n xy = \"x\" in obj.dims and \"y\" in obj.dims\n elif isinstance(obj, (xu.UgridDataArray, xu.UgridDataset)):\n xy = obj.ugrid.grid.face_dimension in obj.dims\n else:\n raise TypeError(\n \"obj should be DataArray or UgridDataArray, \"\n f\"received {type(obj)} instead\"\n )\n return xy\n\n def _compose_values(self, da, directory, name, binary):\n \"\"\"\n Compose values of dictionary.\n\n Ignores times. Time dependent boundary conditions use the method from\n BoundaryCondition.\n\n See documentation of wq\n \"\"\"\n layered = False\n values = []\n if self._is_xy_data(da):\n if binary:\n path = (directory / f\"{name}.bin\").as_posix()\n values.append(f\"open/close {path} (binary)\")\n else:\n path = (directory / f\"{name}.dat\").as_posix()\n values.append(f\"open/close {path}\")\n else:\n if \"layer\" in da.dims:\n layered = True\n for layer in da.coords[\"layer\"]:\n values.append(f\"constant {da.sel(layer=layer).values[()]}\")\n else:\n value = da.values[()]\n if self._valid(value): # skip None or False\n values.append(f\"constant {value}\")\n else:\n values = None\n\n return layered, values\n\n def write(\n self,\n pkgname: str,\n globaltimes: Union[List, np.ndarray],\n write_context: WriteContext,\n ):\n directory = write_context.write_directory\n binary = write_context.use_binary\n self.write_blockfile(pkgname, globaltimes, write_context)\n\n if hasattr(self, \"_grid_data\"):\n if self._is_xy_data(self.dataset):\n pkgdirectory = directory / pkgname\n pkgdirectory.mkdir(exist_ok=True, parents=True)\n for varname, dtype in self._grid_data.items():\n key = self._keyword_map.get(varname, varname)\n da = self.dataset[varname]\n if self._is_xy_data(da):\n if binary:\n path = pkgdirectory / f\"{key}.bin\"\n self.write_binary_griddata(path, da, dtype)\n else:\n path = pkgdirectory / f\"{key}.dat\"\n self.write_text_griddata(path, da, dtype)\n\n def _validate(self, schemata: Dict, **kwargs) -> Dict[str, List[ValidationError]]:\n errors = defaultdict(list)\n for variable, var_schemata in schemata.items():\n for schema in var_schemata:\n if (\n variable in self.dataset.keys()\n ): # concentration only added to dataset if specified\n try:\n schema.validate(self.dataset[variable], **kwargs)\n except ValidationError as e:\n errors[variable].append(e)\n return errors\n\n def is_empty(self) -> bool:\n \"\"\"\n Returns True if the package is empty- for example if it contains only no-data values.\n \"\"\"\n\n # Create schemata dict only containing the\n # variables with a AllNoDataSchema and EmptyIndexesSchema (in case of\n # HFB) in the write schemata.\n allnodata_schemata = filter_schemata_dict(\n self._write_schemata, (AllNoDataSchema, EmptyIndexesSchema)\n )\n\n # Find if packages throws ValidationError for AllNoDataSchema or\n # EmptyIndexesSchema.\n allnodata_errors = self._validate(allnodata_schemata)\n return len(allnodata_errors) > 0\n\n def _validate_init_schemata(self, validate: bool):\n \"\"\"\n Run the \"cheap\" schema validations.\n\n The expensive validations are run during writing. Some are only\n available then: e.g. idomain to determine active part of domain.\n \"\"\"\n if not validate:\n return\n errors = self._validate(self._init_schemata)\n if len(errors) > 0:\n message = validation_pkg_error_message(errors)\n raise ValidationError(message)\n return\n\n def _get_vars_to_check(self):\n \"\"\"\n Helper function to get all variables which were not set to None\n \"\"\"\n variables = []\n for var in self._metadata_dict.keys():\n if ( # Filter optional variables not filled in\n self.dataset[var].size != 1\n ) or (\n self.dataset[var] != None # noqa: E711\n ):\n variables.append(var)\n\n return variables\n\n def copy(self) -> Any:\n # All state should be contained in the dataset.\n return type(self)(**self.dataset.copy())\n\n @staticmethod\n def _clip_repeat_stress(\n repeat_stress: xr.DataArray,\n time,\n time_start,\n time_end,\n ):\n \"\"\"\n Selection may remove the original data which are repeated.\n These should be re-inserted at the first occuring \"key\".\n Next, remove these keys as they've been \"promoted\" to regular\n timestamps with data.\n \"\"\"\n # First, \"pop\" and filter.\n keys, values = repeat_stress.values.T\n keep = (keys >= time_start) & (keys <= time_end)\n new_keys = keys[keep]\n new_values = values[keep]\n # Now detect which \"value\" entries have gone missing\n insert_values, index = np.unique(new_values, return_index=True)\n insert_keys = new_keys[index]\n # Setup indexer\n indexer = xr.DataArray(\n data=np.arange(time.size),\n coords={\"time\": time},\n dims=(\"time\",),\n ).sel(time=insert_values)\n indexer[\"time\"] = insert_keys\n\n # Update the key-value pairs. Discard keys that have been \"promoted\".\n keep = np.in1d(new_keys, insert_keys, assume_unique=True, invert=True)\n new_keys = new_keys[keep]\n new_values = new_values[keep]\n # Set the values to their new source.\n new_values = insert_keys[np.searchsorted(insert_values, new_values)]\n repeat_stress = xr.DataArray(\n data=np.column_stack((new_keys, new_values)),\n dims=(\"repeat\", \"repeat_items\"),\n )\n return indexer, repeat_stress\n\n @staticmethod\n def _clip_time_indexer(\n time,\n time_start,\n time_end,\n ):\n original = xr.DataArray(\n data=np.arange(time.size),\n coords={\"time\": time},\n dims=(\"time\",),\n )\n indexer = original.sel(time=slice(time_start, time_end))\n\n # The selection might return a 0-sized dimension.\n if indexer.size > 0:\n first_time = indexer[\"time\"].values[0]\n else:\n first_time = None\n\n # If the first time matches exactly, xarray will have done thing we\n # wanted and our work with the time dimension is finished.\n if (time_start is not None) and (time_start != first_time):\n # If the first time is before the original time, we need to\n # backfill; otherwise, we need to ffill the first timestamp.\n if time_start < time[0]:\n method = \"bfill\"\n else:\n method = \"ffill\"\n # Index with a list rather than a scalar to preserve the time\n # dimension.\n first = original.sel(time=[time_start], method=method)\n first[\"time\"] = [time_start]\n indexer = xr.concat([first, indexer], dim=\"time\")\n\n return indexer\n\n def __to_datetime(self, time, use_cftime):\n \"\"\"\n Helper function that converts to datetime, except when None.\n \"\"\"\n if time is None:\n return time\n else:\n return imod.wq.timeutil.to_datetime(time, use_cftime)\n\n def clip_box(\n self,\n time_min=None,\n time_max=None,\n layer_min=None,\n layer_max=None,\n x_min=None,\n x_max=None,\n y_min=None,\n y_max=None,\n state_for_boundary=None,\n ) -> \"Package\":\n \"\"\"\n Clip a package by a bounding box (time, layer, y, x).\n\n Slicing intervals may be half-bounded, by providing None:\n\n * To select 500.0 <= x <= 1000.0:\n ``clip_box(x_min=500.0, x_max=1000.0)``.\n * To select x <= 1000.0: ``clip_box(x_min=None, x_max=1000.0)``\n or ``clip_box(x_max=1000.0)``.\n * To select x >= 500.0: ``clip_box(x_min = 500.0, x_max=None.0)``\n or ``clip_box(x_min=1000.0)``.\n\n Parameters\n ----------\n time_min: optional\n time_max: optional\n layer_min: optional, int\n layer_max: optional, int\n x_min: optional, float\n x_max: optional, float\n y_min: optional, float\n y_max: optional, float\n\n Returns\n -------\n clipped: Package\n \"\"\"\n selection = self.dataset\n if \"time\" in selection:\n time = selection[\"time\"].values\n use_cftime = isinstance(time[0], cftime.datetime)\n time_start = self.__to_datetime(time_min, use_cftime)\n time_end = self.__to_datetime(time_max, use_cftime)\n\n indexer = self._clip_time_indexer(\n time=time,\n time_start=time_start,\n time_end=time_end,\n )\n\n if \"repeat_stress\" in selection.data_vars and self._valid(\n selection[\"repeat_stress\"].values[()]\n ):\n repeat_indexer, repeat_stress = self._clip_repeat_stress(\n repeat_stress=selection[\"repeat_stress\"],\n time=time,\n time_start=time_start,\n time_end=time_end,\n )\n selection = selection.drop_vars(\"repeat_stress\")\n selection[\"repeat_stress\"] = repeat_stress\n indexer = repeat_indexer.combine_first(indexer).astype(int)\n\n selection = selection.drop_vars(\"time\").isel(time=indexer)\n\n if \"layer\" in selection.coords:\n layer_slice = slice(layer_min, layer_max)\n # Cannot select if it's not a dimension!\n if \"layer\" not in selection.dims:\n selection = (\n selection.expand_dims(\"layer\")\n .sel(layer=layer_slice)\n .squeeze(\"layer\")\n )\n else:\n selection = selection.sel(layer=layer_slice)\n\n x_slice = slice(x_min, x_max)\n y_slice = slice(y_min, y_max)\n if isinstance(selection, xu.UgridDataset):\n selection = selection.ugrid.sel(x=x_slice, y=y_slice)\n elif (\"x\" in selection.coords) and (\"y\" in selection.coords):\n if selection.indexes[\"y\"].is_monotonic_decreasing:\n y_slice = slice(y_max, y_min)\n selection = selection.sel(x=x_slice, y=y_slice)\n\n cls = type(self)\n new = cls.__new__(cls)\n new.dataset = selection\n return new\n\n def mask(self, domain: GridDataArray) -> Any:\n \"\"\"\n Mask values outside of domain.\n\n Floating values outside of the condition are set to NaN (nodata).\n Integer values outside of the condition are set to 0 (inactive in\n MODFLOW terms).\n\n Parameters\n ----------\n domain: xr.DataArray of integers. Preservers values where domain is larger than 0.\n\n Returns\n -------\n masked: Package\n The package with part masked.\n \"\"\"\n masked = {}\n for var in self.dataset.data_vars.keys():\n da = self.dataset[var]\n if self.skip_masking_dataarray(var):\n masked[var] = da\n continue\n if set(domain.dims).issubset(da.dims):\n if issubclass(da.dtype.type, numbers.Integral):\n masked[var] = da.where(domain > 0, other=0)\n elif issubclass(da.dtype.type, numbers.Real):\n masked[var] = da.where(domain > 0)\n else:\n raise TypeError(\n f\"Expected dtype float or integer. Received instead: {da.dtype}\"\n )\n else:\n if da.values[()] is not None:\n if is_scalar(da.values[()]):\n masked[var] = da.values[()] # For scalars, such as options\n else:\n masked[\n var\n ] = da # For example for arrays with only a layer dimension\n else:\n masked[var] = None\n\n return type(self)(**masked)\n\n def is_regridding_supported(self) -> bool:\n \"\"\"\n returns true if package supports regridding.\n \"\"\"\n return hasattr(self, \"_regrid_method\")\n\n def get_regrid_methods(self) -> Optional[Dict[str, Tuple[RegridderType, str]]]:\n if self.is_regridding_supported():\n return self._regrid_method\n return None\n\n def _regrid_array(\n self,\n varname: str,\n regridder_collection: RegridderInstancesCollection,\n regridder_name: str,\n regridder_function: str,\n target_grid: GridDataArray,\n ) -> Optional[GridDataArray]:\n \"\"\"\n Regrids a data_array. The array is specified by its key in the dataset.\n Each data-array can represent:\n -a scalar value, valid for the whole grid\n -an array of a different scalar per layer\n -an array with a value per grid block\n -None\n \"\"\"\n\n # skip regridding for arrays with no valid values (such as \"None\")\n if not self._valid(self.dataset[varname].values[()]):\n return None\n\n # the dataarray might be a scalar. If it is, then it does not need regridding.\n if is_scalar(self.dataset[varname]):\n return self.dataset[varname].values[()]\n\n if isinstance(self.dataset[varname], xr.DataArray):\n coords = self.dataset[varname].coords\n # if it is an xr.DataArray it may be layer-based; then no regridding is needed\n if not (\"x\" in coords and \"y\" in coords):\n return self.dataset[varname]\n\n # if it is an xr.DataArray it needs the dx, dy coordinates for regridding, which are otherwise not mandatory\n if not (\"dx\" in coords and \"dy\" in coords):\n raise ValueError(\n f\"DataArray {varname} does not have both a dx and dy coordinates\"\n )\n\n # obtain an instance of a regridder for the chosen method\n regridder = regridder_collection.get_regridder(\n regridder_name,\n regridder_function,\n )\n\n # store original dtype of data\n original_dtype = self.dataset[varname].dtype\n\n # regrid data array\n regridded_array = regridder.regrid(self.dataset[varname])\n\n # reconvert the result to the same dtype as the original\n return regridded_array.astype(original_dtype)\n\n def regrid_like(\n self,\n target_grid: GridDataArray,\n regridder_types: Dict[str, Tuple[RegridderType, str]] = None,\n ) -> \"Package\":\n \"\"\"\n Creates a package of the same type as this package, based on another discretization.\n It regrids all the arrays in this package to the desired discretization, and leaves the options\n unmodified. At the moment only regridding to a different planar grid is supported, meaning\n ``target_grid`` has different ``\"x\"`` and ``\"y\"`` or different ``cell2d`` coords.\n\n The regridding methods can be specified in the _regrid_method attribute of the package. These are the defaults\n that specify how each array should be regridded. These defaults can be overridden using the input\n parameters of this function.\n\n Examples\n --------\n To regrid the npf package with a non-default method for the k-field, call regrid_like with these arguments:\n\n >>> new_npf = npf.regrid_like(like, {\"k\": (imod.RegridderType.OVERLAP, \"mean\")})\n\n\n Parameters\n ----------\n target_grid: xr.DataArray or xu.UgridDataArray\n a grid defined over the same discretization as the one we want to regrid the package to\n regridder_types: dict(str->(regridder type,str))\n dictionary mapping arraynames (str) to a tuple of regrid type (a specialization class of BaseRegridder) and function name (str)\n this dictionary can be used to override the default mapping method.\n\n Returns\n -------\n a package with the same options as this package, and with all the data-arrays regridded to another discretization,\n similar to the one used in input argument \"target_grid\"\n \"\"\"\n if not self.is_regridding_supported():\n raise NotImplementedError(\n f\"Package {type(self).__name__} does not support regridding\"\n )\n\n regridder_collection = RegridderInstancesCollection(\n self.dataset, target_grid=target_grid\n )\n\n regridder_settings = copy.deepcopy(self._regrid_method)\n if regridder_types is not None:\n regridder_settings.update(regridder_types)\n\n new_package_data = get_non_grid_data(self, list(regridder_settings.keys()))\n\n for (\n varname,\n regridder_type_and_function,\n ) in regridder_settings.items():\n regridder_name, regridder_function = regridder_type_and_function\n\n # skip variables that are not in this dataset\n if varname not in self.dataset.keys():\n continue\n\n # regrid the variable\n new_package_data[varname] = self._regrid_array(\n varname,\n regridder_collection,\n regridder_name,\n regridder_function,\n target_grid,\n )\n\n new_package = self.__class__(**new_package_data)\n\n return new_package\n\n def skip_masking_dataarray(self, array_name: str) -> bool:\n if hasattr(self, \"_skip_mask_arrays\"):\n return array_name in self._skip_mask_arrays\n return False\n\n @classmethod\n def is_grid_agnostic_package(cls) -> bool:\n return False\n\n def __repr__(self) -> str:\n typename = type(self).__name__\n return f\"{typename}\\n{self.dataset.__repr__()}\"\n\n def _repr_html_(self) -> str:\n typename = type(self).__name__\n return f\"<div>{typename}</div>{self.dataset._repr_html_()}\""
},
{
"identifier": "RegridderType",
"path": "imod/mf6/regridding_utils.py",
"snippet": "class RegridderType(Enum):\n \"\"\"\n Enumerator referring to regridder types in ``xugrid``.\n These can be used safely in scripts, remaining backwards compatible for\n when it is decided to rename regridders in ``xugrid``. For an explanation\n what each regridder type does, we refer to the `xugrid documentation <https://deltares.github.io/xugrid/examples/regridder_overview.html>`_\n \"\"\"\n\n CENTROIDLOCATOR = xu.CentroidLocatorRegridder\n BARYCENTRIC = xu.BarycentricInterpolator\n OVERLAP = xu.OverlapRegridder\n RELATIVEOVERLAP = xu.RelativeOverlapRegridder"
},
{
"identifier": "DisBottomSchema",
"path": "imod/mf6/validation.py",
"snippet": "class DisBottomSchema(NoDataComparisonSchema):\n \"\"\"\n Custom schema for the bottoms as these require some additional logic,\n because of how Modflow 6 computes cell thicknesses.\n \"\"\"\n\n def validate(self, obj: xr.DataArray, **kwargs):\n other_obj = kwargs[self.other]\n\n active = self.is_other_notnull(other_obj)\n bottom = obj\n\n # Only check for multi-layered models\n if bottom.coords[\"layer\"].size > 1:\n # Check if zero thicknesses occur in active cells. The difference across\n # layers is a \"negative thickness\"\n thickness = bottom.diff(dim=\"layer\") * -1.0\n if (thickness.where(active.isel(layer=slice(1, None))) <= 0.0).any():\n raise ValidationError(\"found thickness <= 0.0\")\n\n # To compute thicknesses properly, Modflow 6 requires bottom data in the\n # layer above the active cell in question.\n overlaying_top_inactive = np.isnan(bottom).shift(layer=1, fill_value=False)\n if (overlaying_top_inactive & active).any():\n raise ValidationError(\"inactive bottom above active cell\")"
},
{
"identifier": "ActiveCellsConnectedSchema",
"path": "imod/schemata.py",
"snippet": "class ActiveCellsConnectedSchema(BaseSchema):\n \"\"\"\n Check if active cells are connected, to avoid isolated islands which can\n cause convergence issues, if they don't have a head boundary condition, but\n do have a specified flux.\n\n Note\n ----\n This schema only works for structured grids.\n \"\"\"\n\n def __init__(\n self,\n is_notnull: Union[Callable, Tuple[str, Any]] = _notnull,\n ):\n if isinstance(is_notnull, tuple):\n op, value = is_notnull\n self.is_notnull = partial_operator(op, value)\n else:\n self.is_notnull = is_notnull\n\n def validate(self, obj: xr.DataArray, **kwargs):\n if isinstance(obj, xu.UgridDataArray):\n # TODO: https://deltares.github.io/xugrid/api/xugrid.UgridDataArrayAccessor.connected_components.html\n raise NotImplementedError(\n f\"Schema {self.__name__} only works for structured grids, received xu.UgridDataArray.\"\n )\n\n active = self.is_notnull(obj)\n\n _, nlabels = scipy.ndimage.label(active)\n if nlabels > 1:\n raise ValidationError(\n f\"{nlabels} disconnected areas detected in model domain\"\n )"
},
{
"identifier": "AllValueSchema",
"path": "imod/schemata.py",
"snippet": "class AllValueSchema(ValueSchema):\n \"\"\"\n Validate whether all values pass a condition.\n\n E.g. if operator is \">\":\n\n assert (values > threshold).all()\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n if isinstance(self.other, str):\n other_obj = kwargs[self.other]\n else:\n other_obj = self.other\n\n if scalar_None(obj) or scalar_None(other_obj):\n return\n\n explicitly_ignored = self.get_explicitly_ignored(kwargs)\n\n ignore = (\n np.isnan(obj) | np.isnan(other_obj) | explicitly_ignored\n ) # ignore nan by setting to True\n\n condition = self.operator(obj, other_obj)\n condition = condition | ignore\n if not condition.all():\n raise ValidationError(\n f\"not all values comply with criterion: {self.operator_str} {self.other}\"\n )"
},
{
"identifier": "AnyValueSchema",
"path": "imod/schemata.py",
"snippet": "class AnyValueSchema(ValueSchema):\n \"\"\"\n Validate whether any value passes a condition.\n\n E.g. if operator is \">\":\n\n assert (values > threshold).any()\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n if isinstance(self.other, str):\n other_obj = kwargs[self.other]\n else:\n other_obj = self.other\n\n if scalar_None(obj) or scalar_None(other_obj):\n return\n\n explicitly_ignored = self.get_explicitly_ignored(kwargs)\n\n ignore = (\n ~np.isnan(obj) | ~np.isnan(other_obj) | explicitly_ignored\n ) # ignore nan by setting to False\n\n condition = self.operator(obj, other_obj)\n condition = condition | ignore\n if not condition.any():\n raise ValidationError(\n f\"not a single value complies with criterion: {self.operator_str} {self.other}\"\n )"
},
{
"identifier": "DimsSchema",
"path": "imod/schemata.py",
"snippet": "class DimsSchema(BaseSchema):\n def __init__(self, *dims: DimsT) -> None:\n self.dims = dims\n\n def _fill_in_face_dim(self, obj: Union[xr.DataArray, xu.UgridDataArray]):\n \"\"\"\n Return dims with a filled in face dim if necessary.\n \"\"\"\n if \"{face_dim}\" in self.dims and isinstance(obj, xu.UgridDataArray):\n return tuple(\n (\n obj.ugrid.grid.face_dimension if i == \"{face_dim}\" else i\n for i in self.dims\n )\n )\n elif \"{edge_dim}\" in self.dims and isinstance(obj, xu.UgridDataArray):\n return tuple(\n (\n obj.ugrid.grid.edge_dimension if i == \"{edge_dim}\" else i\n for i in self.dims\n )\n )\n else:\n return self.dims\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> None:\n \"\"\"Validate dimensions\n Parameters\n ----------\n dims : Tuple[Union[str, None]]\n Dimensions of the DataArray. `None` may be used as a wildcard value.\n \"\"\"\n dims = self._fill_in_face_dim(obj)\n # Force to tuple for error message print\n expected = tuple(dims)\n actual = tuple(obj.dims)\n if actual != expected:\n raise ValidationError(f\"dim mismatch: expected {expected}, got {actual}\")"
},
{
"identifier": "DTypeSchema",
"path": "imod/schemata.py",
"snippet": "class DTypeSchema(BaseSchema):\n def __init__(self, dtype: DTypeLike) -> None:\n if dtype in [\n np.floating,\n np.integer,\n np.signedinteger,\n np.unsignedinteger,\n np.generic,\n ]:\n self.dtype = dtype\n else:\n self.dtype = np.dtype(dtype)\n\n def validate(self, obj: xr.DataArray, **kwargs) -> None:\n \"\"\"\n Validate dtype\n\n Parameters\n ----------\n dtype : Any\n Dtype of the DataArray.\n \"\"\"\n if scalar_None(obj):\n return\n\n if not np.issubdtype(obj.dtype, self.dtype):\n raise ValidationError(f\"dtype {obj.dtype} != {self.dtype}\")"
},
{
"identifier": "IdentityNoDataSchema",
"path": "imod/schemata.py",
"snippet": "class IdentityNoDataSchema(NoDataComparisonSchema):\n \"\"\"\n Checks that the NoData values are located at exactly the same locations.\n\n Tests only if if all dimensions of the other object are present in the\n object. So tests if \"stage\" with `{time, layer, y, x}` compared to \"idomain\"\n `{layer, y, x}` but doesn't test if \"k\" with `{layer}` is comperated to\n \"idomain\" `{layer, y, x}`\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n other_obj = kwargs[self.other]\n\n # Only test if object has all dimensions in other object.\n missing_dims = set(other_obj.dims) - set(obj.dims)\n\n if len(missing_dims) == 0:\n valid = self.is_notnull(obj)\n other_valid = self.is_other_notnull(other_obj)\n if (valid ^ other_valid).any():\n raise ValidationError(f\"nodata is not aligned with {self.other}\")"
},
{
"identifier": "IndexesSchema",
"path": "imod/schemata.py",
"snippet": "class IndexesSchema(EmptyIndexesSchema):\n \"\"\"\n Verify indexes, check if no dims with zero size are included and that\n indexes are monotonic. Skips unstructured grid dimensions.\n \"\"\"\n\n def __init__(self) -> None:\n pass\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> None:\n # Test if indexes all empty\n super().validate(obj)\n\n dims_to_validate = self.get_dims_to_validate(obj)\n\n for dim in dims_to_validate:\n if dim == \"y\":\n if not obj.indexes[dim].is_monotonic_decreasing:\n raise ValidationError(\n f\"coord {dim} which is not monotonically decreasing\"\n )\n\n else:\n if not obj.indexes[dim].is_monotonic_increasing:\n raise ValidationError(\n f\"coord {dim} which is not monotonically increasing\"\n )"
}
] | import pathlib
import numpy as np
import imod
from imod.mf6.package import Package
from imod.mf6.regridding_utils import RegridderType
from imod.mf6.validation import DisBottomSchema
from imod.schemata import (
ActiveCellsConnectedSchema,
AllValueSchema,
AnyValueSchema,
DimsSchema,
DTypeSchema,
IdentityNoDataSchema,
IndexesSchema,
) | 9,382 |
class StructuredDiscretization(Package):
"""
Discretization information for structered grids is specified using the file.
(DIS6) Only one discretization input file (DISU6, DISV6 or DIS6) can be
specified for a model.
https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=35
Parameters
----------
top: array of floats (xr.DataArray)
is the top elevation for each cell in the top model layer.
bottom: array of floats (xr.DataArray)
is the bottom elevation for each cell.
idomain: array of integers (xr.DataArray)
Indicates the existence status of a cell. Horizontal discretization
information will be derived from the x and y coordinates of the
DataArray. If the idomain value for a cell is 0, the cell does not exist
in the simulation. Input and output values will be read and written for
the cell, but internal to the program, the cell is excluded from the
solution. If the idomain value for a cell is 1, the cell exists in the
simulation. if the idomain value for a cell is -1, the cell does not
exist in the simulation. Furthermore, the first existing cell above will
be connected to the first existing cell below. This type of cell is
referred to as a "vertical pass through" cell.
validate: {True, False}
Flag to indicate whether the package should be validated upon
initialization. This raises a ValidationError if package input is
provided in the wrong manner. Defaults to True.
"""
_pkg_id = "dis"
_init_schemata = {
"top": [
DTypeSchema(np.floating),
DimsSchema("y", "x") | DimsSchema(),
IndexesSchema(),
],
"bottom": [
DTypeSchema(np.floating),
DimsSchema("layer", "y", "x") | DimsSchema("layer"),
IndexesSchema(),
],
"idomain": [
DTypeSchema(np.integer),
DimsSchema("layer", "y", "x"),
IndexesSchema(),
],
}
_write_schemata = {
"idomain": (
|
class StructuredDiscretization(Package):
"""
Discretization information for structered grids is specified using the file.
(DIS6) Only one discretization input file (DISU6, DISV6 or DIS6) can be
specified for a model.
https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=35
Parameters
----------
top: array of floats (xr.DataArray)
is the top elevation for each cell in the top model layer.
bottom: array of floats (xr.DataArray)
is the bottom elevation for each cell.
idomain: array of integers (xr.DataArray)
Indicates the existence status of a cell. Horizontal discretization
information will be derived from the x and y coordinates of the
DataArray. If the idomain value for a cell is 0, the cell does not exist
in the simulation. Input and output values will be read and written for
the cell, but internal to the program, the cell is excluded from the
solution. If the idomain value for a cell is 1, the cell exists in the
simulation. if the idomain value for a cell is -1, the cell does not
exist in the simulation. Furthermore, the first existing cell above will
be connected to the first existing cell below. This type of cell is
referred to as a "vertical pass through" cell.
validate: {True, False}
Flag to indicate whether the package should be validated upon
initialization. This raises a ValidationError if package input is
provided in the wrong manner. Defaults to True.
"""
_pkg_id = "dis"
_init_schemata = {
"top": [
DTypeSchema(np.floating),
DimsSchema("y", "x") | DimsSchema(),
IndexesSchema(),
],
"bottom": [
DTypeSchema(np.floating),
DimsSchema("layer", "y", "x") | DimsSchema("layer"),
IndexesSchema(),
],
"idomain": [
DTypeSchema(np.integer),
DimsSchema("layer", "y", "x"),
IndexesSchema(),
],
}
_write_schemata = {
"idomain": ( | ActiveCellsConnectedSchema(is_notnull=("!=", 0)), | 3 | 2023-12-08 13:57:59+00:00 | 12k |
Dong142857/Live3DPortrait | models/model.py | [
{
"identifier": "upfirdn2d",
"path": "torch_utils/ops/upfirdn2d.py",
"snippet": "def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):\n r\"\"\"Pad, upsample, filter, and downsample a batch of 2D images.\n\n Performs the following sequence of operations for each channel:\n\n 1. Upsample the image by inserting N-1 zeros after each pixel (`up`).\n\n 2. Pad the image with the specified number of zeros on each side (`padding`).\n Negative padding corresponds to cropping the image.\n\n 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it\n so that the footprint of all output pixels lies within the input image.\n\n 4. Downsample the image by keeping every Nth pixel (`down`).\n\n This sequence of operations bears close resemblance to scipy.signal.upfirdn().\n The fused op is considerably more efficient than performing the same calculation\n using standard PyTorch ops. It supports gradients of arbitrary order.\n\n Args:\n x: Float32/float64/float16 input tensor of the shape\n `[batch_size, num_channels, in_height, in_width]`.\n f: Float32 FIR filter of the shape\n `[filter_height, filter_width]` (non-separable),\n `[filter_taps]` (separable), or\n `None` (identity).\n up: Integer upsampling factor. Can be a single int or a list/tuple\n `[x, y]` (default: 1).\n down: Integer downsampling factor. Can be a single int or a list/tuple\n `[x, y]` (default: 1).\n padding: Padding with respect to the upsampled image. Can be a single number\n or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`\n (default: 0).\n flip_filter: False = convolution, True = correlation (default: False).\n gain: Overall scaling factor for signal magnitude (default: 1).\n impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).\n\n Returns:\n Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.\n \"\"\"\n assert isinstance(x, torch.Tensor)\n assert impl in ['ref', 'cuda']\n if impl == 'cuda' and x.device.type == 'cuda' and _init():\n return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)\n return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)"
},
{
"identifier": "assert_shape",
"path": "torch_utils/misc.py",
"snippet": "def assert_shape(tensor, ref_shape):\n if tensor.ndim != len(ref_shape):\n raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')\n for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)):\n if ref_size is None:\n pass\n elif isinstance(ref_size, torch.Tensor):\n with suppress_tracer_warnings(): # as_tensor results are registered as constants\n symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}')\n elif isinstance(size, torch.Tensor):\n with suppress_tracer_warnings(): # as_tensor results are registered as constants\n symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')\n elif size != ref_size:\n raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')"
},
{
"identifier": "ImportanceRenderer",
"path": "models/eg3d/volumetric_rendering/renderer.py",
"snippet": "class ImportanceRenderer(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.ray_marcher = MipRayMarcher2()\n self.plane_axes = generate_planes()\n\n def forward(self, planes, decoder, ray_origins, ray_directions, rendering_options):\n self.plane_axes = self.plane_axes.to(ray_origins.device)\n\n if rendering_options['ray_start'] == rendering_options['ray_end'] == 'auto':\n ray_start, ray_end = math_utils.get_ray_limits_box(ray_origins, ray_directions, box_side_length=rendering_options['box_warp'])\n is_ray_valid = ray_end > ray_start\n if torch.any(is_ray_valid).item():\n ray_start[~is_ray_valid] = ray_start[is_ray_valid].min()\n ray_end[~is_ray_valid] = ray_start[is_ray_valid].max()\n depths_coarse = self.sample_stratified(ray_origins, ray_start, ray_end, rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])\n else:\n # Create stratified depth samples\n depths_coarse = self.sample_stratified(ray_origins, rendering_options['ray_start'], rendering_options['ray_end'], rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])\n\n batch_size, num_rays, samples_per_ray, _ = depths_coarse.shape\n\n # Coarse Pass\n sample_coordinates = (ray_origins.unsqueeze(-2) + depths_coarse * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3)\n sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, samples_per_ray, -1).reshape(batch_size, -1, 3)\n\n\n out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options)\n colors_coarse = out['rgb']\n densities_coarse = out['sigma']\n colors_coarse = colors_coarse.reshape(batch_size, num_rays, samples_per_ray, colors_coarse.shape[-1])\n densities_coarse = densities_coarse.reshape(batch_size, num_rays, samples_per_ray, 1)\n\n # Fine Pass\n N_importance = rendering_options['depth_resolution_importance']\n if N_importance > 0:\n _, _, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options)\n\n depths_fine = self.sample_importance(depths_coarse, weights, N_importance)\n\n sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, N_importance, -1).reshape(batch_size, -1, 3)\n sample_coordinates = (ray_origins.unsqueeze(-2) + depths_fine * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3)\n\n out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options)\n colors_fine = out['rgb']\n densities_fine = out['sigma']\n colors_fine = colors_fine.reshape(batch_size, num_rays, N_importance, colors_fine.shape[-1])\n densities_fine = densities_fine.reshape(batch_size, num_rays, N_importance, 1)\n\n all_depths, all_colors, all_densities = self.unify_samples(depths_coarse, colors_coarse, densities_coarse,\n depths_fine, colors_fine, densities_fine)\n\n # Aggregate\n rgb_final, depth_final, weights = self.ray_marcher(all_colors, all_densities, all_depths, rendering_options)\n else:\n rgb_final, depth_final, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options)\n\n\n return rgb_final, depth_final, weights.sum(2)\n\n def run_model(self, planes, decoder, sample_coordinates, sample_directions, options):\n sampled_features = sample_from_planes(self.plane_axes, planes, sample_coordinates, padding_mode='zeros', box_warp=options['box_warp'])\n\n out = decoder(sampled_features, sample_directions)\n if options.get('density_noise', 0) > 0:\n out['sigma'] += torch.randn_like(out['sigma']) * options['density_noise']\n return out\n\n def sort_samples(self, all_depths, all_colors, all_densities):\n _, indices = torch.sort(all_depths, dim=-2)\n all_depths = torch.gather(all_depths, -2, indices)\n all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))\n all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))\n return all_depths, all_colors, all_densities\n\n def unify_samples(self, depths1, colors1, densities1, depths2, colors2, densities2):\n all_depths = torch.cat([depths1, depths2], dim = -2)\n all_colors = torch.cat([colors1, colors2], dim = -2)\n all_densities = torch.cat([densities1, densities2], dim = -2)\n\n _, indices = torch.sort(all_depths, dim=-2)\n all_depths = torch.gather(all_depths, -2, indices)\n all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))\n all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))\n\n return all_depths, all_colors, all_densities\n\n def sample_stratified(self, ray_origins, ray_start, ray_end, depth_resolution, disparity_space_sampling=False):\n \"\"\"\n Return depths of approximately uniformly spaced samples along rays.\n \"\"\"\n N, M, _ = ray_origins.shape\n if disparity_space_sampling:\n depths_coarse = torch.linspace(0,\n 1,\n depth_resolution,\n device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)\n depth_delta = 1/(depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta\n depths_coarse = 1./(1./ray_start * (1. - depths_coarse) + 1./ray_end * depths_coarse)\n else:\n if type(ray_start) == torch.Tensor:\n depths_coarse = math_utils.linspace(ray_start, ray_end, depth_resolution).permute(1,2,0,3)\n depth_delta = (ray_end - ray_start) / (depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta[..., None]\n else:\n depths_coarse = torch.linspace(ray_start, ray_end, depth_resolution, device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)\n depth_delta = (ray_end - ray_start)/(depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta\n\n return depths_coarse\n\n def sample_importance(self, z_vals, weights, N_importance):\n \"\"\"\n Return depths of importance sampled points along rays. See NeRF importance sampling for more.\n \"\"\"\n with torch.no_grad():\n batch_size, num_rays, samples_per_ray, _ = z_vals.shape\n\n z_vals = z_vals.reshape(batch_size * num_rays, samples_per_ray)\n weights = weights.reshape(batch_size * num_rays, -1) # -1 to account for loss of 1 sample in MipRayMarcher\n\n # smooth weights\n weights = torch.nn.functional.max_pool1d(weights.unsqueeze(1).float(), 2, 1, padding=1)\n weights = torch.nn.functional.avg_pool1d(weights, 2, 1).squeeze()\n weights = weights + 0.01\n\n z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:])\n importance_z_vals = self.sample_pdf(z_vals_mid, weights[:, 1:-1],\n N_importance).detach().reshape(batch_size, num_rays, N_importance, 1)\n return importance_z_vals\n\n def sample_pdf(self, bins, weights, N_importance, det=False, eps=1e-5):\n \"\"\"\n Sample @N_importance samples from @bins with distribution defined by @weights.\n Inputs:\n bins: (N_rays, N_samples_+1) where N_samples_ is \"the number of coarse samples per ray - 2\"\n weights: (N_rays, N_samples_)\n N_importance: the number of samples to draw from the distribution\n det: deterministic or not\n eps: a small number to prevent division by zero\n Outputs:\n samples: the sampled samples\n \"\"\"\n N_rays, N_samples_ = weights.shape\n weights = weights + eps # prevent division by zero (don't do inplace op!)\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (N_rays, N_samples_)\n cdf = torch.cumsum(pdf, -1) # (N_rays, N_samples), cumulative distribution function\n cdf = torch.cat([torch.zeros_like(cdf[: ,:1]), cdf], -1) # (N_rays, N_samples_+1)\n # padded to 0~1 inclusive\n\n if det:\n u = torch.linspace(0, 1, N_importance, device=bins.device)\n u = u.expand(N_rays, N_importance)\n else:\n u = torch.rand(N_rays, N_importance, device=bins.device)\n u = u.contiguous()\n\n inds = torch.searchsorted(cdf, u, right=True)\n below = torch.clamp_min(inds-1, 0)\n above = torch.clamp_max(inds, N_samples_)\n\n inds_sampled = torch.stack([below, above], -1).view(N_rays, 2*N_importance)\n cdf_g = torch.gather(cdf, 1, inds_sampled).view(N_rays, N_importance, 2)\n bins_g = torch.gather(bins, 1, inds_sampled).view(N_rays, N_importance, 2)\n\n denom = cdf_g[...,1]-cdf_g[...,0]\n denom[denom<eps] = 1 # denom equals 0 means a bin has weight 0, in which case it will not be sampled\n # anyway, therefore any value for it is fine (set to 1 here)\n\n samples = bins_g[...,0] + (u-cdf_g[...,0])/denom * (bins_g[...,1]-bins_g[...,0])\n return samples"
},
{
"identifier": "sample_from_planes",
"path": "models/eg3d/volumetric_rendering/renderer.py",
"snippet": "def sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None):\n assert padding_mode == 'zeros'\n N, n_planes, C, H, W = plane_features.shape\n _, M, _ = coordinates.shape\n plane_features = plane_features.view(N*n_planes, C, H, W)\n\n coordinates = (2/box_warp) * coordinates # TODO: add specific box bounds\n\n projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1)\n output_features = torch.nn.functional.grid_sample(plane_features, projected_coordinates.float(), mode=mode, padding_mode=padding_mode, align_corners=False).permute(0, 3, 2, 1).reshape(N, n_planes, M, C)\n return output_features"
},
{
"identifier": "RaySampler",
"path": "models/eg3d/volumetric_rendering/ray_sampler.py",
"snippet": "class RaySampler(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.ray_origins_h, self.ray_directions, self.depths, self.image_coords, self.rendering_options = None, None, None, None, None\n\n\n def forward(self, cam2world_matrix, intrinsics, resolution):\n \"\"\"\n Create batches of rays and return origins and directions.\n\n cam2world_matrix: (N, 4, 4)\n intrinsics: (N, 3, 3)\n resolution: int\n\n ray_origins: (N, M, 3)\n ray_dirs: (N, M, 2)\n \"\"\"\n N, M = cam2world_matrix.shape[0], resolution**2\n cam_locs_world = cam2world_matrix[:, :3, 3]\n fx = intrinsics[:, 0, 0]\n fy = intrinsics[:, 1, 1]\n cx = intrinsics[:, 0, 2]\n cy = intrinsics[:, 1, 2]\n sk = intrinsics[:, 0, 1]\n\n uv = torch.stack(torch.meshgrid(torch.arange(resolution, dtype=torch.float32, device=cam2world_matrix.device), torch.arange(resolution, dtype=torch.float32, device=cam2world_matrix.device), indexing='ij')) * (1./resolution) + (0.5/resolution)\n uv = uv.flip(0).reshape(2, -1).transpose(1, 0)\n uv = uv.unsqueeze(0).repeat(cam2world_matrix.shape[0], 1, 1)\n\n x_cam = uv[:, :, 0].view(N, -1)\n y_cam = uv[:, :, 1].view(N, -1)\n z_cam = torch.ones((N, M), device=cam2world_matrix.device)\n\n x_lift = (x_cam - cx.unsqueeze(-1) + cy.unsqueeze(-1)*sk.unsqueeze(-1)/fy.unsqueeze(-1) - sk.unsqueeze(-1)*y_cam/fy.unsqueeze(-1)) / fx.unsqueeze(-1) * z_cam\n y_lift = (y_cam - cy.unsqueeze(-1)) / fy.unsqueeze(-1) * z_cam\n\n cam_rel_points = torch.stack((x_lift, y_lift, z_cam, torch.ones_like(z_cam)), dim=-1)\n\n world_rel_points = torch.bmm(cam2world_matrix, cam_rel_points.permute(0, 2, 1)).permute(0, 2, 1)[:, :, :3]\n\n ray_dirs = world_rel_points - cam_locs_world[:, None, :]\n ray_dirs = torch.nn.functional.normalize(ray_dirs, dim=2)\n\n ray_origins = cam_locs_world.unsqueeze(1).repeat(1, ray_dirs.shape[1], 1)\n\n return ray_origins, ray_dirs"
},
{
"identifier": "SuperresolutionHybrid8XDC",
"path": "models/eg3d/superresolution.py",
"snippet": "class SuperresolutionHybrid8XDC(torch.nn.Module):\n def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias,\n num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE\n **block_kwargs):\n super().__init__()\n assert img_resolution == 512\n\n use_fp16 = sr_num_fp16_res > 0\n self.input_resolution = 128\n self.sr_antialias = sr_antialias\n self.block0 = SynthesisBlock(channels, 256, w_dim=512, resolution=256,\n img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)\n self.block1 = SynthesisBlock(256, 128, w_dim=512, resolution=512,\n img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)\n\n def forward(self, rgb, x, ws, **block_kwargs):\n ws = ws[:, -1:, :].repeat(1, 3, 1) # 提取最后一层的w [B, 1, 512]\n\n if x.shape[-1] != self.input_resolution:\n x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),\n mode='bilinear', align_corners=False, antialias=self.sr_antialias)\n rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),\n mode='bilinear', align_corners=False, antialias=self.sr_antialias)\n\n x, rgb = self.block0(x, rgb, ws, **block_kwargs)\n x, rgb = self.block1(x, rgb, ws, **block_kwargs)\n return rgb"
},
{
"identifier": "FullyConnectedLayer",
"path": "models/eg3d/networks_stylegan2.py",
"snippet": "class FullyConnectedLayer(torch.nn.Module):\n def __init__(self,\n in_features, # Number of input features.\n out_features, # Number of output features.\n bias = True, # Apply additive bias before the activation function?\n activation = 'linear', # Activation function: 'relu', 'lrelu', etc.\n lr_multiplier = 1, # Learning rate multiplier.\n bias_init = 0, # Initial value for the additive bias.\n ):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.activation = activation\n self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)\n self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None\n self.weight_gain = lr_multiplier / np.sqrt(in_features)\n self.bias_gain = lr_multiplier\n\n def forward(self, x):\n w = self.weight.to(x.dtype) * self.weight_gain\n b = self.bias\n if b is not None:\n b = b.to(x.dtype)\n if self.bias_gain != 1:\n b = b * self.bias_gain\n\n if self.activation == 'linear' and b is not None:\n x = torch.addmm(b.unsqueeze(0), x, w.t())\n else:\n x = x.matmul(w.t())\n x = bias_act.bias_act(x, b, act=self.activation)\n return x\n\n def extra_repr(self):\n return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'"
},
{
"identifier": "OSGDecoder",
"path": "models/eg3d/triplane.py",
"snippet": "class OSGDecoder(torch.nn.Module):\n def __init__(self, n_features, options):\n super().__init__()\n self.hidden_dim = 64\n\n self.net = torch.nn.Sequential(\n FullyConnectedLayer(n_features, self.hidden_dim, lr_multiplier=options['decoder_lr_mul']),\n torch.nn.Softplus(),\n FullyConnectedLayer(self.hidden_dim, 1 + options['decoder_output_dim'], lr_multiplier=options['decoder_lr_mul'])\n )\n \n def forward(self, sampled_features, ray_directions):\n # Aggregate features\n sampled_features = sampled_features.mean(1)\n x = sampled_features\n\n N, M, C = x.shape\n x = x.view(N*M, C)\n\n x = self.net(x)\n x = x.view(N, M, -1)\n rgb = torch.sigmoid(x[..., 1:])*(1 + 2*0.001) - 0.001 # Uses sigmoid clamping from MipNeRF\n sigma = x[..., 0:1]\n return {'rgb': rgb, 'sigma': sigma}"
},
{
"identifier": "Generator",
"path": "models/eg3d/networks_stylegan2.py",
"snippet": "class Generator(torch.nn.Module):\n def __init__(self,\n z_dim, # Input latent (Z) dimensionality.\n c_dim, # Conditioning label (C) dimensionality.\n w_dim, # Intermediate latent (W) dimensionality.\n img_resolution, # Output resolution.\n img_channels, # Number of output color channels.\n mapping_kwargs = {}, # Arguments for MappingNetwork.\n **synthesis_kwargs, # Arguments for SynthesisNetwork.\n ):\n super().__init__()\n self.z_dim = z_dim\n self.c_dim = c_dim\n self.w_dim = w_dim\n self.img_resolution = img_resolution\n self.img_channels = img_channels\n self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)\n self.num_ws = self.synthesis.num_ws\n self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)\n\n def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):\n ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)\n img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)\n return img"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import sys
from segmentation_models_pytorch.decoders.deeplabv3 import DeepLabV3
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from einops import rearrange, repeat
from torch_utils.ops import upfirdn2d
from torch_utils.misc import assert_shape
from functools import reduce
from typing import Union
from segmentation_models_pytorch.encoders.mix_transformer import OverlapPatchEmbed, Block
from models.eg3d.volumetric_rendering.renderer import ImportanceRenderer, sample_from_planes
from models.eg3d.volumetric_rendering.ray_sampler import RaySampler
from models.eg3d.superresolution import SuperresolutionHybrid8XDC
from models.eg3d.networks_stylegan2 import FullyConnectedLayer
from models.eg3d.triplane import OSGDecoder
from models.eg3d.networks_stylegan2 import Generator as StyleGAN2Backbone | 7,378 | # import segmentation_models_pytorch
'''
impletement of RealTimeRF including the full model and LT model
'''
# sys.path.insert(0, os.path.abspath(os.path.join(__file__, '..')))
sys.path.append(".")
sys.path.append("..")
class TriGenerator(nn.Module):
'''
similar to TriplaneGenerator class but lack of renderer
'''
def __init__(self, # 参数表暂时不删,做占位用
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
sr_num_fp16_res = 0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
rendering_kwargs = {},
sr_kwargs = {},
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim=z_dim
self.c_dim=c_dim
self.w_dim=w_dim
self.backbone = StyleGAN2Backbone(z_dim, c_dim, w_dim, img_resolution=256, img_channels=32*3, mapping_kwargs=mapping_kwargs, **synthesis_kwargs)
self._last_planes = None
self.rendering_kwargs = rendering_kwargs
def mapping(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
if self.rendering_kwargs['c_gen_conditioning_zero']:
c = torch.zeros_like(c)
return self.backbone.mapping(z, c * self.rendering_kwargs.get('c_scale', 0), truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
def synthesis(self, ws, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
if use_cached_backbone and self._last_planes is not None:
planes = self._last_planes
else:
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
if cache_backbone:
self._last_planes = planes
return planes
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
# Render a batch of generated images.
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
return self.synthesis(ws, update_emas=update_emas, neural_rendering_resolution=neural_rendering_resolution, cache_backbone=cache_backbone, use_cached_backbone=use_cached_backbone, **synthesis_kwargs)
class TriplaneRenderer(nn.Module):
def __init__(self, img_resolution, img_channels, rendering_kwargs={}) -> None:
'''
Triplane Renderer
Generate 2D image from triplanes representation
SuperResolution without stylecode
FullyConnected layer
'''
super(TriplaneRenderer, self).__init__()
self.img_resolution=img_resolution
self.img_channels=img_channels
self.renderer = ImportanceRenderer()
self.ray_sampler = RaySampler()
self.w_dim = 512
self.const = torch.nn.Parameter(torch.randn([1, 1, self.w_dim])) # 常数输入
self.decoder = OSGDecoder(32, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), 'decoder_output_dim': 32})
self.superresolution = SuperresolutionHybrid8XDC(32, img_resolution, sr_num_fp16_res=0, sr_antialias=True)
self.rendering_kwargs = rendering_kwargs
self.neural_rendering_resolution = 128 # 64
def synthesis(self, planes, c, neural_rendering_resolution=None):
cam2world_matrix = c[:, :16].view(-1, 4, 4)
intrinsics = c[:, 16:25].view(-1, 3, 3)
if neural_rendering_resolution is None:
neural_rendering_resolution = self.neural_rendering_resolution
else:
self.neural_rendering_resolution = neural_rendering_resolution
# Create a batch of rays for volume rendering
ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution)
N, _, _ = ray_origins.shape
# Reshape output into three 32-channel planes
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
# Perform volume rendering
feature_samples, depth_samples, _ = self.renderer(planes, self.decoder, ray_origins, ray_directions, self.rendering_kwargs) # channels last
# Reshape into 'raw' neural-rendered image
H = W = neural_rendering_resolution
feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous()
depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W)
# Run superresolution to get final image
rgb_image = feature_image[:, :3]
# sr_image = self.superresolution(rgb_image, feature_image, ws, noise_mode='none')
const_w_input = self.const.repeat([N, 1, 1])
sr_image = self.superresolution(rgb_image, feature_image, const_w_input, noise_mode='none')
return {'image': sr_image,
'image_raw': rgb_image,
'image_depth': depth_image,
'feature_image': feature_image,
'planes': planes}
def sample_density(self, planes, coordinates, directions):
'''
给定triplanes和camera参数,生成图像并且返回
'''
| # import segmentation_models_pytorch
'''
impletement of RealTimeRF including the full model and LT model
'''
# sys.path.insert(0, os.path.abspath(os.path.join(__file__, '..')))
sys.path.append(".")
sys.path.append("..")
class TriGenerator(nn.Module):
'''
similar to TriplaneGenerator class but lack of renderer
'''
def __init__(self, # 参数表暂时不删,做占位用
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
sr_num_fp16_res = 0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
rendering_kwargs = {},
sr_kwargs = {},
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim=z_dim
self.c_dim=c_dim
self.w_dim=w_dim
self.backbone = StyleGAN2Backbone(z_dim, c_dim, w_dim, img_resolution=256, img_channels=32*3, mapping_kwargs=mapping_kwargs, **synthesis_kwargs)
self._last_planes = None
self.rendering_kwargs = rendering_kwargs
def mapping(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
if self.rendering_kwargs['c_gen_conditioning_zero']:
c = torch.zeros_like(c)
return self.backbone.mapping(z, c * self.rendering_kwargs.get('c_scale', 0), truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
def synthesis(self, ws, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
if use_cached_backbone and self._last_planes is not None:
planes = self._last_planes
else:
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
if cache_backbone:
self._last_planes = planes
return planes
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
# Render a batch of generated images.
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
return self.synthesis(ws, update_emas=update_emas, neural_rendering_resolution=neural_rendering_resolution, cache_backbone=cache_backbone, use_cached_backbone=use_cached_backbone, **synthesis_kwargs)
class TriplaneRenderer(nn.Module):
def __init__(self, img_resolution, img_channels, rendering_kwargs={}) -> None:
'''
Triplane Renderer
Generate 2D image from triplanes representation
SuperResolution without stylecode
FullyConnected layer
'''
super(TriplaneRenderer, self).__init__()
self.img_resolution=img_resolution
self.img_channels=img_channels
self.renderer = ImportanceRenderer()
self.ray_sampler = RaySampler()
self.w_dim = 512
self.const = torch.nn.Parameter(torch.randn([1, 1, self.w_dim])) # 常数输入
self.decoder = OSGDecoder(32, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), 'decoder_output_dim': 32})
self.superresolution = SuperresolutionHybrid8XDC(32, img_resolution, sr_num_fp16_res=0, sr_antialias=True)
self.rendering_kwargs = rendering_kwargs
self.neural_rendering_resolution = 128 # 64
def synthesis(self, planes, c, neural_rendering_resolution=None):
cam2world_matrix = c[:, :16].view(-1, 4, 4)
intrinsics = c[:, 16:25].view(-1, 3, 3)
if neural_rendering_resolution is None:
neural_rendering_resolution = self.neural_rendering_resolution
else:
self.neural_rendering_resolution = neural_rendering_resolution
# Create a batch of rays for volume rendering
ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution)
N, _, _ = ray_origins.shape
# Reshape output into three 32-channel planes
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
# Perform volume rendering
feature_samples, depth_samples, _ = self.renderer(planes, self.decoder, ray_origins, ray_directions, self.rendering_kwargs) # channels last
# Reshape into 'raw' neural-rendered image
H = W = neural_rendering_resolution
feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous()
depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W)
# Run superresolution to get final image
rgb_image = feature_image[:, :3]
# sr_image = self.superresolution(rgb_image, feature_image, ws, noise_mode='none')
const_w_input = self.const.repeat([N, 1, 1])
sr_image = self.superresolution(rgb_image, feature_image, const_w_input, noise_mode='none')
return {'image': sr_image,
'image_raw': rgb_image,
'image_depth': depth_image,
'feature_image': feature_image,
'planes': planes}
def sample_density(self, planes, coordinates, directions):
'''
给定triplanes和camera参数,生成图像并且返回
''' | sampled_features = sample_from_planes(self.renderer.plane_axes, planes, coordinates, padding_mode='zeros', box_warp=self.rendering_kwargs['box_warp']) | 3 | 2023-12-09 15:18:53+00:00 | 12k |
blaise-tk/RVC_CLI | rvc/infer/infer.py | [
{
"identifier": "load_audio",
"path": "rvc/lib/utils.py",
"snippet": "def load_audio(file, sampling_rate):\n try:\n file = file.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n out, _ = (\n ffmpeg.input(file, threads=0)\n .output(\"-\", format=\"f32le\", acodec=\"pcm_f32le\", ac=1, ar=sampling_rate)\n .run(cmd=[\"ffmpeg\", \"-nostdin\"], capture_stdout=True, capture_stderr=True)\n )\n except Exception as error:\n raise RuntimeError(f\"Failed to load audio: {error}\")\n\n return np.frombuffer(out, np.float32).flatten()"
},
{
"identifier": "SynthesizerTrnMs256NSFsid",
"path": "rvc/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super(SynthesizerTrnMs256NSFsid, self).__init__()\n if isinstance(sr, str):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n pitchf: torch.Tensor,\n y: torch.Tensor,\n y_lengths: torch.Tensor,\n ds: Optional[torch.Tensor] = None,\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n nsff0: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n assert isinstance(rate, torch.Tensor)\n head = int(z_p.shape[2] * (1 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n nsff0 = nsff0[:, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs256NSFsid_nono",
"path": "rvc/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs256NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super(SynthesizerTrnMs256NSFsid_nono, self).__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs768NSFsid",
"path": "rvc/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs768NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super(SynthesizerTrnMs768NSFsid, self).__init__()\n if isinstance(sr, str):\n sr = sr\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n nsff0: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n nsff0 = nsff0[:, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs768NSFsid_nono",
"path": "rvc/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs768NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super(SynthesizerTrnMs768NSFsid_nono, self).__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "Config",
"path": "rvc/configs/config.py",
"snippet": "class Config:\n def __init__(self):\n self.device = \"cuda:0\"\n self.is_half = True\n self.use_jit = False\n self.n_cpu = 0\n self.gpu_name = None\n self.json_config = self.load_config_json()\n self.gpu_mem = None\n self.instead = \"\"\n self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()\n\n @staticmethod\n def load_config_json() -> dict:\n d = {}\n for config_file in version_config_list:\n with open(f\"rvc/configs/{config_file}\", \"r\") as f:\n d[config_file] = json.load(f)\n return d\n\n @staticmethod\n def has_mps() -> bool:\n if not torch.backends.mps.is_available():\n return False\n try:\n torch.zeros(1).to(torch.device(\"mps\"))\n return True\n except Exception:\n return False\n\n @staticmethod\n def has_xpu() -> bool:\n if hasattr(torch, \"xpu\") and torch.xpu.is_available():\n return True\n else:\n return False\n\n def use_fp32_config(self):\n for config_file in version_config_list:\n self.json_config[config_file][\"train\"][\"fp16_run\"] = False\n with open(f\"rvc/configs/{config_file}\", \"r\") as f:\n strr = f.read().replace(\"true\", \"false\")\n with open(f\"rvc/configs/{config_file}\", \"w\") as f:\n f.write(strr)\n with open(\"rvc/train/preprocess/preprocess.py\", \"r\") as f:\n strr = f.read().replace(\"3.7\", \"3.0\")\n with open(\"rvc/train/preprocess/preprocess.py\", \"w\") as f:\n f.write(strr)\n\n def device_config(self) -> tuple:\n if torch.cuda.is_available():\n if self.has_xpu():\n self.device = self.instead = \"xpu:0\"\n self.is_half = True\n i_device = int(self.device.split(\":\")[-1])\n self.gpu_name = torch.cuda.get_device_name(i_device)\n if (\n (\"16\" in self.gpu_name and \"V100\" not in self.gpu_name.upper())\n or \"P40\" in self.gpu_name.upper()\n or \"P10\" in self.gpu_name.upper()\n or \"1060\" in self.gpu_name\n or \"1070\" in self.gpu_name\n or \"1080\" in self.gpu_name\n ):\n self.is_half = False\n self.use_fp32_config()\n self.gpu_mem = int(\n torch.cuda.get_device_properties(i_device).total_memory\n / 1024\n / 1024\n / 1024\n + 0.4\n )\n if self.gpu_mem <= 4:\n with open(\"rvc/train/preprocess/preprocess.py\", \"r\") as f:\n strr = f.read().replace(\"3.7\", \"3.0\")\n with open(\"rvc/train/preprocess/preprocess.py\", \"w\") as f:\n f.write(strr)\n elif self.has_mps():\n print(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"mps\"\n self.is_half = False\n self.use_fp32_config()\n else:\n print(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"cpu\"\n self.is_half = False\n self.use_fp32_config()\n\n if self.n_cpu == 0:\n self.n_cpu = cpu_count()\n\n if self.is_half:\n x_pad = 3\n x_query = 10\n x_center = 60\n x_max = 65\n else:\n x_pad = 1\n x_query = 6\n x_center = 38\n x_max = 41\n\n if self.gpu_mem is not None and self.gpu_mem <= 4:\n x_pad = 1\n x_query = 5\n x_center = 30\n x_max = 32\n\n return x_pad, x_query, x_center, x_max"
}
] | import os
import sys
import torch
import numpy as np
import soundfile as sf
from vc_infer_pipeline import VC
from rvc.lib.utils import load_audio
from fairseq import checkpoint_utils
from rvc.lib.infer_pack.models import (
SynthesizerTrnMs256NSFsid,
SynthesizerTrnMs256NSFsid_nono,
SynthesizerTrnMs768NSFsid,
SynthesizerTrnMs768NSFsid_nono,
)
from rvc.configs.config import Config | 7,790 |
config = Config()
torch.manual_seed(114514)
hubert_model = None
def load_hubert():
global hubert_model
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
["hubert_base.pt"],
suffix="",
)
hubert_model = models[0]
hubert_model = hubert_model.to(config.device)
if config.is_half:
hubert_model = hubert_model.half()
else:
hubert_model = hubert_model.float()
hubert_model.eval()
def vc_single(
sid=0,
input_audio_path=None,
f0_up_key=None,
f0_file=None,
f0_method=None,
file_index=None,
index_rate=None,
resample_sr=0,
rms_mix_rate=1,
protect=0.33,
hop_length=None,
output_path=None,
):
global tgt_sr, net_g, vc, hubert_model, version
if input_audio_path is None:
return "Please, load an audio!", None
f0_up_key = int(f0_up_key)
try:
audio = load_audio(input_audio_path, 16000)
audio_max = np.abs(audio).max() / 0.95
if audio_max > 1:
audio /= audio_max
if not hubert_model:
load_hubert()
if_f0 = cpt.get("f0", 1)
file_index = (
file_index.strip(" ")
.strip('"')
.strip("\n")
.strip('"')
.strip(" ")
.replace("trained", "added")
)
if tgt_sr != resample_sr >= 16000:
tgt_sr = resample_sr
audio_opt = vc.pipeline(
hubert_model,
net_g,
sid,
audio,
input_audio_path,
f0_up_key,
f0_method,
file_index,
index_rate,
if_f0,
filter_radius,
tgt_sr,
resample_sr,
rms_mix_rate,
version,
protect,
hop_length,
f0_file=f0_file,
)
if output_path is not None:
sf.write(output_path, audio_opt, tgt_sr, format="WAV")
return (tgt_sr, audio_opt)
except Exception as error:
print(error)
def get_vc(weight_root, sid):
global n_spk, tgt_sr, net_g, vc, cpt, version
if sid == "" or sid == []:
global hubert_model
if hubert_model is not None:
print("clean_empty_cache")
del net_g, n_spk, vc, hubert_model, tgt_sr # ,cpt
hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
if_f0 = cpt.get("f0", 1)
version = cpt.get("version", "v1")
if version == "v1":
if if_f0 == 1:
net_g = SynthesizerTrnMs256NSFsid(
*cpt["config"], is_half=config.is_half
)
else:
|
config = Config()
torch.manual_seed(114514)
hubert_model = None
def load_hubert():
global hubert_model
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
["hubert_base.pt"],
suffix="",
)
hubert_model = models[0]
hubert_model = hubert_model.to(config.device)
if config.is_half:
hubert_model = hubert_model.half()
else:
hubert_model = hubert_model.float()
hubert_model.eval()
def vc_single(
sid=0,
input_audio_path=None,
f0_up_key=None,
f0_file=None,
f0_method=None,
file_index=None,
index_rate=None,
resample_sr=0,
rms_mix_rate=1,
protect=0.33,
hop_length=None,
output_path=None,
):
global tgt_sr, net_g, vc, hubert_model, version
if input_audio_path is None:
return "Please, load an audio!", None
f0_up_key = int(f0_up_key)
try:
audio = load_audio(input_audio_path, 16000)
audio_max = np.abs(audio).max() / 0.95
if audio_max > 1:
audio /= audio_max
if not hubert_model:
load_hubert()
if_f0 = cpt.get("f0", 1)
file_index = (
file_index.strip(" ")
.strip('"')
.strip("\n")
.strip('"')
.strip(" ")
.replace("trained", "added")
)
if tgt_sr != resample_sr >= 16000:
tgt_sr = resample_sr
audio_opt = vc.pipeline(
hubert_model,
net_g,
sid,
audio,
input_audio_path,
f0_up_key,
f0_method,
file_index,
index_rate,
if_f0,
filter_radius,
tgt_sr,
resample_sr,
rms_mix_rate,
version,
protect,
hop_length,
f0_file=f0_file,
)
if output_path is not None:
sf.write(output_path, audio_opt, tgt_sr, format="WAV")
return (tgt_sr, audio_opt)
except Exception as error:
print(error)
def get_vc(weight_root, sid):
global n_spk, tgt_sr, net_g, vc, cpt, version
if sid == "" or sid == []:
global hubert_model
if hubert_model is not None:
print("clean_empty_cache")
del net_g, n_spk, vc, hubert_model, tgt_sr # ,cpt
hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
if_f0 = cpt.get("f0", 1)
version = cpt.get("version", "v1")
if version == "v1":
if if_f0 == 1:
net_g = SynthesizerTrnMs256NSFsid(
*cpt["config"], is_half=config.is_half
)
else: | net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) | 2 | 2023-12-10 21:09:41+00:00 | 12k |
Opt-Mucca/PySCIPOpt-ML | src/pyscipopt_ml/modelling/gradient_boosting/aggregate_tree_model.py | [
{
"identifier": "add_decision_tree_classifier_constr",
"path": "src/pyscipopt_ml/sklearn/decision_tree.py",
"snippet": "def add_decision_tree_classifier_constr(\n scip_model,\n decision_tree_classifier,\n input_vars,\n output_vars=None,\n unique_naming_prefix=\"\",\n epsilon=0.0,\n **kwargs,\n):\n \"\"\"Formulate decision_tree_classifier into a SCIP Model.\n\n The formulation predicts the values of output_vars using input_vars\n according to decision_tree_classifier.\n\n Parameters\n ----------\n scip_model : PySCIPOpt Model\n The SCIP Model where the predictor should be inserted.\n decision_tree_classifier : :external+sklearn:py:class:`sklearn.tree.DecisionTreeClassifier`\n The decision tree classifier to insert as predictor.\n input_vars : list or np.ndarray\n Decision variables used as input for decision tree in model.\n output_vars : list or np.ndarray, optional\n Decision variables used as output for decision tree in model.\n unique_naming_prefix : str, optional\n A unique naming prefix that is used before all variable and constraint names. This parameter is important if\n the SCIP model is later printed to file and many predictors are added to the same SCIP model.\n epsilon : float, optional\n Small value used to impose strict inequalities for splitting nodes in\n MIP formulations.\n Returns\n -------\n DecisionTreeClassifierConstr\n Object containing information about what was added to scip_model to formulate decision_tree_classifier\n\n Note\n ----\n\n |VariablesDimensionsWarn|\n\n Warning\n -------\n\n Although decision trees with multiple outputs are tested they were never\n used in a non-trivial optimization model. It should be used with care at\n this point.\n \"\"\"\n return DecisionTreeConstr(\n scip_model,\n decision_tree_classifier,\n input_vars,\n output_vars,\n unique_naming_prefix,\n epsilon,\n True,\n **kwargs,\n )"
},
{
"identifier": "add_decision_tree_regressor_constr",
"path": "src/pyscipopt_ml/sklearn/decision_tree.py",
"snippet": "def add_decision_tree_regressor_constr(\n scip_model,\n decision_tree_regressor,\n input_vars,\n output_vars=None,\n unique_naming_prefix=\"\",\n epsilon=0.0,\n **kwargs,\n):\n \"\"\"Formulate decision_tree_regressor into a SCIP Model.\n\n The formulation predicts the values of output_vars using input_vars\n according to decision_tree_regressor.\n\n Parameters\n ----------\n scip_model : PySCIPOpt Model\n The SCIP Model where the predictor should be inserted.\n decision_tree_regressor : :external+sklearn:py:class:`sklearn.tree.DecisionTreeRegressor`\n The decision tree regressor to insert as predictor.\n input_vars : list or np.ndarray\n Decision variables used as input for decision tree in model.\n output_vars : list or np.ndarray, optional\n Decision variables used as output for decision tree in model.\n unique_naming_prefix : str, optional\n A unique naming prefix that is used before all variable and constraint names. This parameter is important if\n the SCIP model is later printed to file and many predictors are added to the same SCIP model.\n epsilon : float, optional\n Small value used to impose strict inequalities for splitting nodes in\n MIP formulations.\n Returns\n -------\n DecisionTreeRegressorConstr\n Object containing information about what was added to scip_model to formulate decision_tree_regressor\n\n Note\n ----\n\n |VariablesDimensionsWarn|\n \"\"\"\n return DecisionTreeConstr(\n scip_model,\n decision_tree_regressor,\n input_vars,\n output_vars,\n unique_naming_prefix,\n epsilon,\n False,\n **kwargs,\n )"
},
{
"identifier": "AbstractPredictorConstr",
"path": "src/pyscipopt_ml/modelling/base_predictor_constraint.py",
"snippet": "class AbstractPredictorConstr(ABC):\n \"\"\"Base class to store all information of embedded ML model by :py:func`pyscipopt_ml.add_predictor_constr`.\n\n This class is the base class to store everything that is added to\n a SCIP model when a trained predictor is inserted into it. Depending on\n the type of the predictor, a class derived from it will be returned\n by :py:func:`pyscipopt_ml.add_predictor_constr`.\n\n Warning\n -------\n\n Users should usually never construct objects of this class or one of its derived\n classes. They are returned by the :py:func:`pyscipopt_ml.add_predictor_constr` and\n other functions.\n \"\"\"\n\n def __init__(\n self, scip_model, input_vars, output_vars=None, unique_naming_prefix=\"\", **kwargs\n ):\n self.scip_model = scip_model\n self.unique_naming_prefix = unique_naming_prefix\n self._validate(input_vars, output_vars)\n self._created_vars = []\n self._created_cons = []\n self._build_predictor_model(**kwargs)\n\n def _validate(self, input_vars, output_vars=None):\n \"\"\"Validate input and output variables (check shapes, reshape if needed).\"\"\"\n\n # Ensure the correct type of input and output is given\n if type(input_vars) not in [list, np.ndarray]:\n raise ParameterError(\n f\"Input variables are not type list or np.ndarray. They are type {type(input_vars)}.\"\n )\n if output_vars is not None:\n if not isinstance(output_vars, list) and not isinstance(output_vars, np.ndarray):\n raise ParameterError(\n f\"Output variables are not type list or np.ndarray. They are type {type(output_vars)}.\"\n )\n\n # Transform the type list to type np.ndarray\n if isinstance(input_vars, list):\n input_vars = np.array(input_vars, dtype=object)\n if isinstance(output_vars, list):\n output_vars = np.array(output_vars, dtype=object)\n\n # Change the dimension of the input variables if needed. (Always want number of data points first)\n if input_vars.ndim == 1:\n input_vars = input_vars.reshape((1, -1))\n if input_vars.ndim >= 3:\n input_vars = input_vars.reshape((input_vars.shape[0], -1))\n\n # In the case of the output being None, create the appropriate output variables here\n if output_vars is None:\n output_vars = self._create_output_vars(input_vars)\n\n # Change the dimensions of the output variables if needed (Always want the number of data points first)\n if output_vars.ndim == 1:\n if input_vars.shape[0] == 1:\n output_vars = output_vars.reshape((1, -1))\n else:\n output_vars = output_vars.reshape((-1, 1))\n\n # Ensure that the variable dimensions match that of the predictor\n if hasattr(self, \"input_size\") and input_vars.shape[-1] != self.input_size:\n raise ParameterError(\n f\"Input variables dimension don't conform with predictor {type(self)} \"\n + f\"Input variable dimensions: {input_vars.shape[-1]} != {self.input_size}\"\n )\n\n if hasattr(self, \"output_size\") and output_vars.shape[-1] != self.output_size:\n raise ParameterError(\n f\"Output variable dimensions don't conform with predictor {type(self)} \"\n + f\"Output variable dimensions: {output_vars.shape[-1]} != {self.output_size}\"\n )\n\n if output_vars.shape[0] != input_vars.shape[0]:\n raise ParameterError(\n \"Non-conforming dimension between input variables and output variables: \"\n + f\"{output_vars.shape[0]} != {input_vars.shape[0]}\"\n )\n\n self._input = input_vars\n self._output = output_vars\n\n def _build_predictor_model(self, **kwargs):\n self._mip_model(**kwargs)\n\n def print_stats(self, file=None):\n \"\"\"Print statistics on model additions stored by this class.\n\n This function prints detailed statistics on the variables\n and constraints that were added to the model.\n\n Arguments\n ---------\n\n file: None, optional\n Text stream to which output should be redirected. By default, this is sys.stdout.\n \"\"\"\n\n n_indicator_cons = 0\n n_sos_cons = 0\n n_linear_cons = 0\n\n created_cons = self._created_cons\n created_vars = self._created_vars\n if hasattr(self, \"_estimators\"):\n for estimator in self._estimators:\n created_cons += estimator._created_cons\n created_vars += estimator._created_vars\n if hasattr(self, \"_layers\"):\n for layer in self._layers:\n created_cons += layer._created_cons\n created_vars += layer._created_vars\n for cons_set in created_cons:\n it = np.nditer(cons_set, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n if isinstance(cons_set[it.multi_index], Constraint):\n cons_type = cons_set[it.multi_index].getConshdlrName()\n if cons_type == \"indicator\":\n n_indicator_cons += 1\n elif cons_type == \"SOS1\":\n n_sos_cons += 1\n elif cons_type == \"linear\":\n n_linear_cons += 1\n else:\n raise TypeError(\n f\"Cons {cons_set[it.multi_index]} is of unknown type {cons_type}\"\n )\n\n n_bin_vars = 0\n n_cont_vars = 0\n\n for var_set in created_vars:\n it = np.nditer(var_set, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n if isinstance(var_set[it.multi_index], Variable):\n var_type = var_set[it.multi_index].vtype()\n if var_type == \"BINARY\":\n n_bin_vars += 1\n elif var_type == \"CONTINUOUS\":\n n_cont_vars += 1\n else:\n raise TypeError(\n f\"Var {var_set[it.multi_index]} is of unknown type {var_type}\"\n )\n\n print(\n f\"Constraints created:\\n Linear {n_linear_cons}\\n Indicator {n_indicator_cons}\\n \"\n f\"SOS1 {n_sos_cons}\\n\"\n f\"Created (internal) variables:\\n Binary {n_bin_vars}\\n Continuous {n_cont_vars}\\n\"\n f\"Input Shape: {self.input.shape}\\nOutput Shape: {self.output.shape}\",\n file=file,\n )\n\n def _create_output_vars(self, input_vars):\n \"\"\"May be defined in derived class to create the output variables of predictor.\"\"\"\n if (not hasattr(self, \"_output\") or self._output is None) and (\n not hasattr(self, \"output_size\") or self.output_size is None\n ):\n raise AttributeError\n\n if not hasattr(self, \"_output\") or self._output is None:\n if hasattr(self, \"classification\"):\n if self.classification:\n vtype = \"B\"\n else:\n vtype = \"C\"\n else:\n vtype = \"C\"\n output_vars = create_vars(\n self.scip_model,\n (input_vars.shape[0], self.output_size),\n vtype,\n lb=None,\n ub=None,\n name_prefix=\"out\",\n )\n return output_vars\n else:\n return self._output\n\n @property\n def _has_solution(self):\n \"\"\"Returns true if we have a solution.\"\"\"\n if self.scip_model.getNSols() > 0:\n return True\n return False\n\n @abstractmethod\n def get_error(self, eps):\n \"\"\"Returns error in SCIP's solution with respect to prediction from input.\n\n Returns\n -------\n error : ndarray of same shape as\n :py:attr:`pyscipopt_ml.modelling.base_predictor_constr.AbstractPredictorConstr.output`\n Assuming that we have a solution for the input and output variables\n `x, y`. Returns the absolute value of the differences between `predictor.predict(x)` and\n `y`. Where predictor is the regression / classification model represented by this object.\n\n Raises\n ------\n NoSolution\n If the SCIP model has no solution (either was not optimized or is infeasible).\n \"\"\"\n ...\n\n @abstractmethod\n def _mip_model(self, **kwargs):\n \"\"\"Makes MIP model for the predictor.\"\"\"\n ...\n\n @property\n def input(self):\n \"\"\"Returns the input variables of embedded predictor.\n\n Returns\n -------\n output : np.ndarray\n \"\"\"\n return self._input\n\n @property\n def output(self):\n \"\"\"Output variables of embedded predictor.\n\n Returns\n -------\n output : np.ndarray\n \"\"\"\n return self._output\n\n @property\n def input_values(self):\n \"\"\"Returns the values for the input variables if a solution is known.\n\n Returns\n -------\n input_vals : np.ndarray\n\n Raises\n ------\n NoSolution\n If SCIP has no solution (either was not optimized or is infeasible).\n \"\"\"\n if not self._has_solution:\n raise NoSolution\n\n input_vals = np.zeros(self.input.shape)\n for i in range(self.input.shape[0]):\n for j in range(self.input.shape[1]):\n input_vals[i][j] = self.scip_model.getVal(self.input[i][j])\n\n return input_vals\n\n @property\n def output_values(self):\n \"\"\"Returns the values for the output variables if a solution is known.\n\n Returns\n -------\n output_value : np.ndarray\n\n Raises\n ------\n NoSolution\n If SCIP has no solution (either was not optimized or is infeasible).\n \"\"\"\n if not self._has_solution:\n raise NoSolution\n\n output_vals = np.zeros(self.output.shape)\n for i in range(self.output.shape[0]):\n for j in range(self.output.shape[1]):\n output_vals[i][j] = self.scip_model.getVal(self.output[i][j])\n\n return output_vals\n\n def __str__(self):\n return self._name"
},
{
"identifier": "argmax_bound_formulation",
"path": "src/pyscipopt_ml/modelling/classification/argmax_model.py",
"snippet": "def argmax_bound_formulation(scip_model, _input, output, unique_naming_prefix, one_dim_center=0.5):\n \"\"\"\n Create constraints that represent the output of a gradient boosted tree given that the individual decision\n trees have already been modelled. The constraints ensure binary output of a single class.\n\n The formulation is different depending on the number of classes. In the case of there being two samples:\n\n Let c be the regression input \\reals^{2}, and z the binary output {0, 1}^{2}\n .. math::\n\n \\begin{align*}\n z_{1} : x_{1} >= x_{2}\n z_{2} : x_{2} >= x_{1}\n \\sum z_{i} == 1\n \\end{align*}\n\n for the case of arbitrary classes the formulation below is used:\n\n Let x be the regression input \\reals^{n}, z the binary output\n {0, 1}^{n}, s the slack variables [0, inf]^{n}, and y the maximum over the input \\reals:\n\n .. math::\n\n \\begin{align*}\n x_{i} + s_{i} - y == 0 \\forall i \\in N\n SOS1(z_{i}, s_{i}) \\forall i \\in N\n \\sum z_{i} == 1\n \\end{align*}\n\n\n Parameters\n ----------\n scip_model : PySCIPOpt Model\n The SCIP Model where the predictor should be inserted.\n _input : np.ndarray\n The (potentially aggregated) output variables from the regression variant of a predictor, which are now\n input to the argmax formulation.\n output : np.ndarray\n The output variables of the (classification) predictor\n unique_naming_prefix : str, optional\n A unique naming prefix that is used before all variable and constraint names. This parameter is important if\n the SCIP model is later printed to file and many predictors are added to the same SCIP model.\n one_dim_center : float, optional\n The value for which the 1-D argmax is centred around. Normally this is 0.5 for a single binary.\n Returns\n -------\n created_vars : list\n A list containing np.ndarray of PySCIPOpt variables that were created for the argmax formulation\n created_cons : list\n A list containing np.ndarray of PySCIPOpt constraints that we created for the argmax formulation\n \"\"\"\n\n assert (\n _input.shape == output.shape\n ), f\"Input and output dimensions do not match. {_input.shape} != {output.shape}\"\n\n # get the in and out dimensions\n n_samples = _input.shape[0]\n outdim = output.shape[-1]\n\n # Separate the formulation into cases\n if outdim == 1:\n name_prefix = unique_naming_prefix + \"argmax\"\n bin_vars = create_vars(scip_model, shape=(n_samples,), vtype=\"B\", name_prefix=name_prefix)\n\n # Create additional constraints\n output_equal_cons = np.zeros((n_samples,), dtype=object)\n output_under_half = np.zeros((n_samples,), dtype=object)\n output_over_half = np.zeros((n_samples,), dtype=object)\n\n # Now populate the constraints\n for i in range(n_samples):\n name = unique_naming_prefix + f\"out_eq_{i}\"\n output_equal_cons[i] = scip_model.addCons(output[i][0] == bin_vars[i], name=name)\n name = unique_naming_prefix + f\"out_ub_{i}\"\n output_under_half[i] = scip_model.addConsIndicator(\n _input[i][0] <= one_dim_center, bin_vars[i], activeone=False, name=name\n )\n name = unique_naming_prefix + f\"out_lb_{i}\"\n output_under_half[i] = scip_model.addConsIndicator(\n -_input[i][0] <= -one_dim_center, bin_vars[i], name=name\n )\n\n return [bin_vars], [output_equal_cons, output_under_half, output_over_half]\n\n elif outdim == 2:\n # Create additional variables\n name_prefix = unique_naming_prefix + \"argmax\"\n max_bin_vars = create_vars(\n scip_model, shape=(n_samples, outdim), vtype=\"B\", name_prefix=name_prefix\n )\n\n # Create additional constraints\n output_equal_cons = np.zeros((n_samples, outdim), dtype=object)\n indicator_cons = np.zeros((n_samples, outdim), dtype=object)\n sum_bin_cons = np.zeros((n_samples,), dtype=object)\n\n # Now populate the constraints\n for i in range(n_samples):\n name = unique_naming_prefix + f\"out_eq_{i}_0\"\n output_equal_cons[i][0] = scip_model.addCons(\n output[i][0] == max_bin_vars[i][0], name=name\n )\n name = unique_naming_prefix + f\"out_eq_{i}_1\"\n output_equal_cons[i][1] = scip_model.addCons(\n output[i][1] == max_bin_vars[i][1], name=name\n )\n name = unique_naming_prefix + f\"indicator_argmax_{i}_0\"\n indicator_cons[i][0] = scip_model.addConsIndicator(\n -_input[i][0] <= -_input[i][1], max_bin_vars[i][0], name=name\n )\n name = unique_naming_prefix + f\"indicator_argmax_{i}_1\"\n indicator_cons[i][1] = scip_model.addConsIndicator(\n -_input[i][1] <= -_input[i][0], max_bin_vars[i][1], name=name\n )\n name = unique_naming_prefix + f\"sum_bin_{i}\"\n sum_bin_cons[i] = scip_model.addCons(\n quicksum(max_bin_vars[i][j] for j in range(outdim)) == 1, name=name\n )\n return [max_bin_vars], [output_equal_cons, indicator_cons, sum_bin_cons]\n else:\n # Create additional variables that are needed for classification\n name_prefix = unique_naming_prefix + \"argmax\"\n max_bin_vars = create_vars(\n scip_model, shape=(n_samples, outdim), vtype=\"B\", name_prefix=name_prefix\n )\n name_prefix = unique_naming_prefix + \"slack_argmax\"\n slack_vars = create_vars(\n scip_model, shape=(n_samples, outdim), vtype=\"C\", lb=0, name_prefix=name_prefix\n )\n name_prefix = unique_naming_prefix + \"max_val\"\n max_val_vars = create_vars(\n scip_model, shape=(n_samples,), vtype=\"C\", lb=None, ub=None, name_prefix=name_prefix\n )\n\n # Create additional constraints that are needed for classification\n output_equal_cons = np.zeros((n_samples, outdim), dtype=object)\n sum_zero_cons = np.zeros((n_samples, outdim), dtype=object)\n sos_slack_bin_cons = np.zeros((n_samples, outdim), dtype=object)\n sum_bin_cons = np.zeros((n_samples,), dtype=object)\n\n for i in range(n_samples):\n for j in range(outdim):\n name = unique_naming_prefix + f\"out_eq_{i}_{j}\"\n output_equal_cons[i][j] = scip_model.addCons(\n output[i][j] == max_bin_vars[i][j], name=name\n )\n name = unique_naming_prefix + f\"slack_zero_eq_{i}_{j}\"\n sum_zero_cons[i][j] = scip_model.addCons(\n _input[i][j] + slack_vars[i][j] - max_val_vars[i] == 0, name=name\n )\n name = unique_naming_prefix + f\"sos_slack_bin_{i}_{j}\"\n sos_slack_bin_cons[i][j] = scip_model.addConsSOS1(\n [slack_vars[i][j], max_bin_vars[i][j]], name=name\n )\n\n name = unique_naming_prefix + f\"sum_bin_{i}\"\n sum_bin_cons[i] = scip_model.addCons(\n quicksum(max_bin_vars[i][j] for j in range(outdim)) == 1, name=name\n )\n\n return [max_bin_vars, max_val_vars], [\n output_equal_cons,\n sum_zero_cons,\n sos_slack_bin_cons,\n sum_bin_cons,\n ]"
},
{
"identifier": "leaf_formulation",
"path": "src/pyscipopt_ml/modelling/decision_tree/decision_tree_model.py",
"snippet": "def leaf_formulation(\n scip_model, _input, output, tree, unique_naming_prefix, epsilon, classification=False\n):\n \"\"\"Formulate decision tree using 'leaf' formulation\n\n We have one variable per leaf of the tree and a series of indicator constraints to\n define when that leaf is reached.\n\n The first step of the procedure is to derive input bounds for each leaf of the decision tree. These bounds will\n dictate for which input values the leaf can be reached. For a single sample, let x \\reals^{nI} be the input,\n z {0,1}^{nL} be binary variables representing if a leaf is reached or not, and y \\reals^{nO} be the output.\n\n .. math::\n\n \\begin{align*}\n z_{i} -> x_{j} \\geq leaf_lb[i][j] \\forall i \\in nL, j \\in nI\n z_{i} -> x_{j} \\leq leaf_ub[i][j] \\forall i \\in nL, j \\in nI\n if classification:\n y_{j} == \\sum z_{i} \\forall i \\in nL \\text{, where class j is output of z_i}\n \\sum y_{k} == 1\n else:\n z_{i} -> y_{k} = leaf_value[i][j] \\forall i \\in nL, k \\in nO\n \\sum z_{i} == 1\n \\end{align*}\n\n \"\"\"\n\n # Create names for items we want to access frequently\n n_samples = _input.shape[0]\n n_features = tree[\"n_features\"]\n outdim = output.shape[-1]\n\n # Collect leaf nodes\n leaf_ids = tree[\"children_left\"] <= -1\n n_leafs = sum(leaf_ids)\n name_prefix = unique_naming_prefix + \"leaf\"\n leaf_vars = create_vars(\n scip_model, shape=(n_samples, n_leafs), vtype=\"B\", lb=0, name_prefix=name_prefix\n )\n\n # Calculate bounds for each leaf node\n (node_lb, node_ub) = compute_leafs_bounds(tree, epsilon, scip_model.infinity())\n\n # Create empty constraint objects\n output_class_sum_leaf_cons = np.zeros((n_samples, outdim), dtype=object)\n indicator_output_cons = np.zeros((n_samples, n_leafs, outdim, 2), dtype=object)\n indicator_leaf_lb = np.zeros((n_samples, n_leafs, n_features), dtype=object)\n indicator_leaf_ub = np.zeros((n_samples, n_leafs, n_features), dtype=object)\n\n # Iterate over all leaf nodes (They are the non-zero entries in leaf_ids)\n for i in range(n_samples):\n leafs_per_class = [0 for _ in range(outdim)]\n for j, node in enumerate(leaf_ids.nonzero()[0]):\n fixed_var = False\n # Fix the leaf variable to 0 if the input bounds do not allow the leaf to be reached\n for feature in range(n_features):\n if (\n _input[i][feature].getLbOriginal() > node_ub[feature][node]\n or _input[i][feature].getUbOriginal() < node_lb[feature][node]\n ):\n scip_model.fixVar(leaf_vars[i][j], 0)\n fixed_var = True\n break\n # If the leaf could be reached, then add two sets of indicator constraints.\n # The first will enforce that a leaf node is only selected if the input values result in such a leaf.\n # The second force the appropriate value output by the leaf to be selected\n if not fixed_var:\n for feature in range(n_features):\n name_lb = unique_naming_prefix + f\"indicator_lb_{i}_{j}_{feature}\"\n name_ub = unique_naming_prefix + f\"indicator_ub_{i}_{j}_{feature}\"\n feat_lb = node_lb[feature, node]\n feat_ub = node_ub[feature, node]\n if (\n feat_lb > -scip_model.infinity()\n and _input[i][feature].getLbOriginal() < feat_lb\n ):\n indicator_leaf_lb[i][j][feature] = scip_model.addConsIndicator(\n -_input[i][feature] <= -feat_lb, leaf_vars[i][j], name=name_lb\n )\n if (\n feat_ub < scip_model.infinity()\n and _input[i][feature].getUbOriginal() > feat_ub\n ):\n indicator_leaf_ub[i][j][feature] = scip_model.addConsIndicator(\n _input[i][feature] <= feat_ub, leaf_vars[i][j], name=name_ub\n )\n # Iterate over the final output shape (num_outputs)\n # In the case of classification (num_classes), simply force the most frequent class to be selected\n if classification:\n value = int(np.argmax(tree[\"value\"][node][0]))\n if outdim == 1:\n if value == 1:\n leafs_per_class[0] += leaf_vars[i][j]\n else:\n leafs_per_class[value] += leaf_vars[i][j]\n else:\n for k in range(outdim):\n name_ub = unique_naming_prefix + f\"indicator_output_{i}_{j}_{k}_0\"\n name_lb = unique_naming_prefix + f\"indicator_output_{i}_{j}_{k}_1\"\n value = tree[\"value\"][node][k][0]\n indicator_output_cons[i][j][k][0] = scip_model.addConsIndicator(\n output[i][k] <= value, leaf_vars[i][j], name=name_ub\n )\n indicator_output_cons[i][j][k][1] = scip_model.addConsIndicator(\n -output[i][k] <= -value, leaf_vars[i][j], name=name_lb\n )\n # Add constraints that ensure the correct class is selected depending on the leaf\n if classification:\n for j in range(outdim):\n name = f\"class_leaf_{i}_{j}\"\n output_class_sum_leaf_cons[i][j] = scip_model.addCons(\n output[i][j] == leafs_per_class[j], name=name\n )\n\n # Now add the constraints that only one leaf can be selected.\n # In the case of classification there is an additional constraint that only one class can be selected\n leaf_sum_cons = np.zeros(n_samples, dtype=object)\n for i in range(n_samples):\n name = unique_naming_prefix + f\"sum_leafs_{i}\"\n leaf_sum_cons[i] = scip_model.addCons(\n quicksum(leaf_vars[i][j] for j in range(leaf_vars.shape[-1])) == 1, name=name\n )\n\n # Finally set potentially stronger global bounds on the output variables (in the case of regression)\n if not classification:\n max_vals = [np.max(tree[\"value\"][:, j, :]) for j in range(outdim)]\n min_vals = [np.min(tree[\"value\"][:, j, :]) for j in range(outdim)]\n for i in range(n_samples):\n for j in range(outdim):\n if output[i][j].getLbOriginal() < min_vals[j]:\n scip_model.chgVarLb(output[i][j], min_vals[j])\n if output[i][j].getUbOriginal() > max_vals[j]:\n scip_model.chgVarUb(output[i][j], max_vals[j])\n\n # Now return the added constraints and variables\n if classification:\n return [leaf_vars], [\n indicator_leaf_lb,\n indicator_leaf_ub,\n output_class_sum_leaf_cons,\n leaf_sum_cons,\n ]\n else:\n return [leaf_vars], [\n indicator_leaf_lb,\n indicator_leaf_ub,\n indicator_output_cons,\n leaf_sum_cons,\n ]"
},
{
"identifier": "create_vars",
"path": "src/pyscipopt_ml/modelling/var_utils.py",
"snippet": "def create_vars(scip_model, shape, vtype, lb=None, ub=None, name_prefix=\"\"):\n \"\"\"\n Create PySCIPOpt variables in a numpy.ndarray of a given shape.\n\n Parameters\n ----------\n scip_model : PySCIPOpt Model\n The SCIP Model where the predictor should be inserted.\n shape : tuple\n The shape of the numpy array that will be constructed\n vtype : 'C' | 'B' | 'I'\n Whether the variables will be continuous, binary, or integer\n lb : float or int or None, optional\n The lower bound of the variables\n ub : float or int or None, optional\n The upper bound of the variables\n name_prefix : str, optional\n The naming prefix used for these variables\n\n Returns\n -------\n scip_vars : np.ndarray\n A np.ndarray with shape (shape) that contains uniquely names variables all of which are the specified type\n \"\"\"\n\n scip_vars = np.zeros(shape, dtype=object)\n it = np.nditer(scip_vars, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n idx_list = str(it.multi_index).strip(\")\").strip(\"(\").split(\",\")\n idx_string = \"\"\n for idx in idx_list:\n if idx == \"\":\n continue\n idx_string += f\"_{int(idx)}\"\n name = name_prefix + idx_string\n scip_vars[it.multi_index] = scip_model.addVar(vtype=vtype, lb=lb, ub=ub, name=name)\n return scip_vars"
}
] | import numpy as np
from ...sklearn.decision_tree import (
add_decision_tree_classifier_constr,
add_decision_tree_regressor_constr,
)
from ..base_predictor_constraint import AbstractPredictorConstr
from ..classification.argmax_model import argmax_bound_formulation
from ..decision_tree import leaf_formulation
from ..var_utils import create_vars | 8,852 | The input variables that are passed to each decision tree
output : np.ndarray
The output variables of the predictor
tree_vars : np.ndarray
The PySCIPOpt variables that have been created to represent the output of each decision tree (i.e. estimator)
trees : list
A list of lists containing dictionary information that completely describe each decision tree (i.e. estimator)
constant : np.ndarray
An array of constant shift values that are added to the output values of each decision tree (i.e. estimator)
lr : float or int
The learning rate used while training. For GBDT / RF this scales the output of each tree
n_estimators : int
The number of decision trees (i.e. estimators)
unique_naming_prefix : str
The unique naming prefix string that goes before all variables and constraints that are constructed by SCIP
epsilon : float
The epsilon that is used for each decision tree model. See
:py:func:`pyscipopt_ml.modelling.decision_tree.leaf_formulation`.
aggr : str, "sum" or "avg"
The aggregation method used in the formulation. Either the estimators are averages or summed.
classification : bool
Whether the aggregated output of each decision tree (i.e. estimator) should be used for classification.
Returns
-------
estimators : list
A list of :py:class`pyscipopt_ml.modelling.aggregate_tree_model.TreeEstimator`
created_vars : list
A list containing all created PySCIPOpt vars
created_cons : list
A list containing all created PySCIPOpt cons
"""
# Get the number of samples and output dimension
n_samples = _input.shape[0]
outdim = output.shape[-1]
# Create the individual tree estimators
estimators = create_tree_estimators(
scip_model,
_input,
tree_vars,
trees,
n_estimators,
outdim,
unique_naming_prefix,
epsilon,
False,
**kwargs,
)
# Aggregate the trees over the output dimension
aggregate_tree_output = aggregate_estimator_outputs(tree_vars, lr, constant, aggr=aggr)
# Formulate the appropriate constraints
created_vars, created_cons = create_aggregation_constraints(
scip_model,
aggregate_tree_output,
output,
n_samples,
outdim,
unique_naming_prefix,
classification,
)
return estimators, created_vars, created_cons
def create_aggregation_constraints(
scip_model,
aggregate_tree_output,
output,
n_samples,
outdim,
unique_naming_prefix,
classification,
):
"""
Creates the variables and constraints that link the output of the predictor itself and the aggregation of each
estimator.
Parameters
----------
scip_model : PySCIPOpt Model
The SCIP Model where the predictor should be inserted.
aggregate_tree_output : np.ndarray
The aggregated output variables of each decision tree
output : np.ndarray
The output variables of the predictor
n_samples : int
The number of samples
outdim : int
The number of outputs of each decision tree (i.e. estimator)
unique_naming_prefix : str
The unique naming prefix string that goes before all variables and constraints that are constructed by SCIP
classification : bool
Whether the aggregated output of each decision tree (i.e. estimator) should be used for classification.
Returns
-------
created_vars : list
A list containing all created PySCIPOpt vars
created_cons : list
A list containing all created PySCIPOpt cons
"""
# Formulate the appropriate constraints
created_cons = []
created_vars = []
if not classification:
sum_tree_cons = np.zeros((n_samples, outdim), dtype=object)
for i in range(n_samples):
for j in range(outdim):
name = unique_naming_prefix + f"tree_sum_{i}_{j}"
sum_tree_cons[i][j] = scip_model.addCons(
output[i][j] == aggregate_tree_output[i][j], name=name
)
created_cons.append(sum_tree_cons)
else:
| """ Utilities for modelling gradient boosting decision trees and random forest constraints """
def aggregated_estimator_formulation(
scip_model,
_input,
output,
tree_vars,
trees,
constant,
lr,
n_estimators,
unique_naming_prefix,
epsilon,
aggr,
classification,
**kwargs,
):
"""
Creates the model that represents the aggregation of estimators into a single output.
This function is used exclusively for the case where the estimators are decision trees, and the larger
predictor is either a gradient boosting decision tree or random forest.
Parameters
----------
scip_model : PySCIPOpt Model
The SCIP Model where the predictor should be inserted.
_input : np.ndarray
The input variables that are passed to each decision tree
output : np.ndarray
The output variables of the predictor
tree_vars : np.ndarray
The PySCIPOpt variables that have been created to represent the output of each decision tree (i.e. estimator)
trees : list
A list of lists containing dictionary information that completely describe each decision tree (i.e. estimator)
constant : np.ndarray
An array of constant shift values that are added to the output values of each decision tree (i.e. estimator)
lr : float or int
The learning rate used while training. For GBDT / RF this scales the output of each tree
n_estimators : int
The number of decision trees (i.e. estimators)
unique_naming_prefix : str
The unique naming prefix string that goes before all variables and constraints that are constructed by SCIP
epsilon : float
The epsilon that is used for each decision tree model. See
:py:func:`pyscipopt_ml.modelling.decision_tree.leaf_formulation`.
aggr : str, "sum" or "avg"
The aggregation method used in the formulation. Either the estimators are averages or summed.
classification : bool
Whether the aggregated output of each decision tree (i.e. estimator) should be used for classification.
Returns
-------
estimators : list
A list of :py:class`pyscipopt_ml.modelling.aggregate_tree_model.TreeEstimator`
created_vars : list
A list containing all created PySCIPOpt vars
created_cons : list
A list containing all created PySCIPOpt cons
"""
# Get the number of samples and output dimension
n_samples = _input.shape[0]
outdim = output.shape[-1]
# Create the individual tree estimators
estimators = create_tree_estimators(
scip_model,
_input,
tree_vars,
trees,
n_estimators,
outdim,
unique_naming_prefix,
epsilon,
False,
**kwargs,
)
# Aggregate the trees over the output dimension
aggregate_tree_output = aggregate_estimator_outputs(tree_vars, lr, constant, aggr=aggr)
# Formulate the appropriate constraints
created_vars, created_cons = create_aggregation_constraints(
scip_model,
aggregate_tree_output,
output,
n_samples,
outdim,
unique_naming_prefix,
classification,
)
return estimators, created_vars, created_cons
def create_aggregation_constraints(
scip_model,
aggregate_tree_output,
output,
n_samples,
outdim,
unique_naming_prefix,
classification,
):
"""
Creates the variables and constraints that link the output of the predictor itself and the aggregation of each
estimator.
Parameters
----------
scip_model : PySCIPOpt Model
The SCIP Model where the predictor should be inserted.
aggregate_tree_output : np.ndarray
The aggregated output variables of each decision tree
output : np.ndarray
The output variables of the predictor
n_samples : int
The number of samples
outdim : int
The number of outputs of each decision tree (i.e. estimator)
unique_naming_prefix : str
The unique naming prefix string that goes before all variables and constraints that are constructed by SCIP
classification : bool
Whether the aggregated output of each decision tree (i.e. estimator) should be used for classification.
Returns
-------
created_vars : list
A list containing all created PySCIPOpt vars
created_cons : list
A list containing all created PySCIPOpt cons
"""
# Formulate the appropriate constraints
created_cons = []
created_vars = []
if not classification:
sum_tree_cons = np.zeros((n_samples, outdim), dtype=object)
for i in range(n_samples):
for j in range(outdim):
name = unique_naming_prefix + f"tree_sum_{i}_{j}"
sum_tree_cons[i][j] = scip_model.addCons(
output[i][j] == aggregate_tree_output[i][j], name=name
)
created_cons.append(sum_tree_cons)
else: | new_vars, new_cons = argmax_bound_formulation( | 3 | 2023-12-10 20:28:22+00:00 | 12k |
camenduru/MotionDirector-hf | utils/lora_handler.py | [
{
"identifier": "UNet3DConditionModel",
"path": "models/unet_3d_condition.py",
"snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n \n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)"
},
{
"identifier": "convert_unet_state_dict",
"path": "utils/convert_diffusers_to_original_ms_text_to_video.py",
"snippet": "def convert_unet_state_dict(unet_state_dict, strict_mapping=False):\n print ('Converting the UNET')\n # buyer beware: this is a *brittle* function,\n # and correct output requires that all of these pieces interact in\n # the exact order in which I have arranged them.\n mapping = {k: k for k in unet_state_dict.keys()}\n\n for sd_name, hf_name in unet_conversion_map:\n if strict_mapping:\n if hf_name in mapping:\n mapping[hf_name] = sd_name\n else:\n mapping[hf_name] = sd_name\n for k, v in mapping.items():\n if \"resnets\" in k:\n for sd_part, hf_part in unet_conversion_map_resnet:\n v = v.replace(hf_part, sd_part)\n mapping[k] = v\n # elif \"temp_convs\" in k:\n # for sd_part, hf_part in unet_conversion_map_resnet:\n # v = v.replace(hf_part, sd_part)\n # mapping[k] = v\n for k, v in mapping.items():\n for sd_part, hf_part in unet_conversion_map_layer:\n v = v.replace(hf_part, sd_part)\n mapping[k] = v\n \n\n # there must be a pattern, but I don't want to bother atm\n do_not_unsqueeze = [f'output_blocks.{i}.1.proj_out.weight' for i in range(3, 12)] + [f'output_blocks.{i}.1.proj_in.weight' for i in range(3, 12)] + ['middle_block.1.proj_in.weight', 'middle_block.1.proj_out.weight'] + [f'input_blocks.{i}.1.proj_out.weight' for i in [1, 2, 4, 5, 7, 8]] + [f'input_blocks.{i}.1.proj_in.weight' for i in [1, 2, 4, 5, 7, 8]]\n print (do_not_unsqueeze)\n\n new_state_dict = {v: (unet_state_dict[k].unsqueeze(-1) if ('proj_' in k and ('bias' not in k) and (k not in do_not_unsqueeze)) else unet_state_dict[k]) for k, v in mapping.items()}\n # HACK: idk why the hell it does not work with list comprehension\n for k, v in new_state_dict.items():\n has_k = False\n for n in do_not_unsqueeze:\n if k == n:\n has_k = True\n\n if has_k:\n v = v.squeeze(-1)\n new_state_dict[k] = v\n\n return new_state_dict"
},
{
"identifier": "convert_text_enc_state_dict_v20",
"path": "utils/convert_diffusers_to_original_ms_text_to_video.py",
"snippet": "def convert_text_enc_state_dict_v20(text_enc_dict):\n #print ('Converting the text encoder')\n new_state_dict = {}\n capture_qkv_weight = {}\n capture_qkv_bias = {}\n for k, v in text_enc_dict.items():\n if (\n k.endswith(\".self_attn.q_proj.weight\")\n or k.endswith(\".self_attn.k_proj.weight\")\n or k.endswith(\".self_attn.v_proj.weight\")\n ):\n k_pre = k[: -len(\".q_proj.weight\")]\n k_code = k[-len(\"q_proj.weight\")]\n if k_pre not in capture_qkv_weight:\n capture_qkv_weight[k_pre] = [None, None, None]\n capture_qkv_weight[k_pre][code2idx[k_code]] = v\n continue\n\n if (\n k.endswith(\".self_attn.q_proj.bias\")\n or k.endswith(\".self_attn.k_proj.bias\")\n or k.endswith(\".self_attn.v_proj.bias\")\n ):\n k_pre = k[: -len(\".q_proj.bias\")]\n k_code = k[-len(\"q_proj.bias\")]\n if k_pre not in capture_qkv_bias:\n capture_qkv_bias[k_pre] = [None, None, None]\n capture_qkv_bias[k_pre][code2idx[k_code]] = v\n continue\n\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k)\n new_state_dict[relabelled_key] = v\n\n for k_pre, tensors in capture_qkv_weight.items():\n if None in tensors:\n raise Exception(\"CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing\")\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)\n new_state_dict[relabelled_key + \".in_proj_weight\"] = torch.cat(tensors)\n\n for k_pre, tensors in capture_qkv_bias.items():\n if None in tensors:\n raise Exception(\"CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing\")\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)\n new_state_dict[relabelled_key + \".in_proj_bias\"] = torch.cat(tensors)\n\n return new_state_dict"
},
{
"identifier": "extract_lora_ups_down",
"path": "utils/lora.py",
"snippet": "def extract_lora_ups_down(model, target_replace_module=DEFAULT_TARGET_REPLACE):\n\n loras = []\n\n for target_replace_module_i in target_replace_module:\n\n for _m, _n, _child_module in _find_modules(\n model,\n [target_replace_module_i],\n search_class=[LoraInjectedLinear, LoraInjectedConv2d, LoraInjectedConv3d],\n ):\n loras.append((_child_module.lora_up, _child_module.lora_down))\n\n if len(loras) == 0:\n raise ValueError(\"No lora injected.\")\n\n return loras"
},
{
"identifier": "inject_trainable_lora_extended",
"path": "utils/lora.py",
"snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n dropout_p: float = 0.0,\n scale: float = 1.0,\n):\n \"\"\"\n inject lora into model, and returns lora parameter groups.\n \"\"\"\n\n require_grad_params = []\n names = []\n\n if loras != None:\n loras = torch.load(loras)\n if True:\n for target_replace_module_i in target_replace_module:\n for _module, name, _child_module in _find_modules(\n model, [target_replace_module_i], search_class=[nn.Linear, nn.Conv2d, nn.Conv3d]\n ):\n # if name == 'to_q':\n # continue\n if _child_module.__class__ == nn.Linear:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedLinear(\n _child_module.in_features,\n _child_module.out_features,\n _child_module.bias is not None,\n r=r,\n dropout_p=dropout_p,\n scale=scale,\n )\n _tmp.linear.weight = weight\n if bias is not None:\n _tmp.linear.bias = bias\n elif _child_module.__class__ == nn.Conv2d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv2d(\n _child_module.in_channels,\n _child_module.out_channels,\n _child_module.kernel_size,\n _child_module.stride,\n _child_module.padding,\n _child_module.dilation,\n _child_module.groups,\n _child_module.bias is not None,\n r=r,\n dropout_p=dropout_p,\n scale=scale,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias\n\n elif _child_module.__class__ == nn.Conv3d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv3d(\n _child_module.in_channels,\n _child_module.out_channels,\n bias=_child_module.bias is not None,\n kernel_size=_child_module.kernel_size,\n padding=_child_module.padding,\n r=r,\n dropout_p=dropout_p,\n scale=scale,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias\n # switch the module\n _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)\n if bias is not None:\n _tmp.to(_child_module.bias.device).to(_child_module.bias.dtype)\n\n _module._modules[name] = _tmp\n require_grad_params.append(_module._modules[name].lora_up.parameters())\n require_grad_params.append(_module._modules[name].lora_down.parameters())\n\n if loras != None:\n _module._modules[name].lora_up.weight = loras.pop(0)\n _module._modules[name].lora_down.weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight.requires_grad = True\n _module._modules[name].lora_down.weight.requires_grad = True\n names.append(name)\n else:\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, nn.Conv2d, nn.Conv3d]\n ):\n if _child_module.__class__ == nn.Linear:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedLinear(\n _child_module.in_features,\n _child_module.out_features,\n _child_module.bias is not None,\n r=r,\n dropout_p=dropout_p,\n scale=scale,\n )\n _tmp.linear.weight = weight\n if bias is not None:\n _tmp.linear.bias = bias\n elif _child_module.__class__ == nn.Conv2d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv2d(\n _child_module.in_channels,\n _child_module.out_channels,\n _child_module.kernel_size,\n _child_module.stride,\n _child_module.padding,\n _child_module.dilation,\n _child_module.groups,\n _child_module.bias is not None,\n r=r,\n dropout_p=dropout_p,\n scale=scale,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias\n\n elif _child_module.__class__ == nn.Conv3d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv3d(\n _child_module.in_channels,\n _child_module.out_channels,\n bias=_child_module.bias is not None,\n kernel_size=_child_module.kernel_size,\n padding=_child_module.padding,\n r=r,\n dropout_p=dropout_p,\n scale=scale,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias\n # switch the module\n _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)\n if bias is not None:\n _tmp.to(_child_module.bias.device).to(_child_module.bias.dtype)\n\n _module._modules[name] = _tmp\n require_grad_params.append(_module._modules[name].lora_up.parameters())\n require_grad_params.append(_module._modules[name].lora_down.parameters())\n\n if loras != None:\n _module._modules[name].lora_up.weight = loras.pop(0)\n _module._modules[name].lora_down.weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight.requires_grad = True\n _module._modules[name].lora_down.weight.requires_grad = True\n names.append(name)\n\n return require_grad_params, names"
},
{
"identifier": "save_lora_weight",
"path": "utils/lora.py",
"snippet": "def save_lora_weight(\n model,\n path=\"./lora.pt\",\n target_replace_module=DEFAULT_TARGET_REPLACE,\n flag=None\n): \n weights = []\n for _up, _down in extract_lora_ups_down(\n model, target_replace_module=target_replace_module\n ):\n weights.append(_up.weight.to(\"cpu\").to(torch.float32))\n weights.append(_down.weight.to(\"cpu\").to(torch.float32))\n if not flag:\n torch.save(weights, path)\n else:\n weights_new=[]\n for i in range(0, len(weights), 4):\n subset = weights[i+(flag-1)*2:i+(flag-1)*2+2]\n weights_new.extend(subset)\n torch.save(weights_new, path)"
},
{
"identifier": "train_patch_pipe",
"path": "utils/lora.py",
"snippet": "def train_patch_pipe(pipe, patch_unet, patch_text):\n if patch_unet:\n print(\"LoRA : Patching Unet\")\n collapse_lora(pipe.unet)\n monkeypatch_remove_lora(pipe.unet)\n\n if patch_text:\n print(\"LoRA : Patching text encoder\")\n\n collapse_lora(pipe.text_encoder)\n monkeypatch_remove_lora(pipe.text_encoder)"
},
{
"identifier": "monkeypatch_or_replace_lora",
"path": "utils/lora.py",
"snippet": "def monkeypatch_or_replace_lora(\n model,\n loras,\n target_replace_module=DEFAULT_TARGET_REPLACE,\n r: Union[int, List[int]] = 4,\n):\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, LoraInjectedLinear]\n ):\n _source = (\n _child_module.linear\n if isinstance(_child_module, LoraInjectedLinear)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedLinear(\n _source.in_features,\n _source.out_features,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n _tmp.linear.weight = weight\n\n if bias is not None:\n _tmp.linear.bias = bias\n\n # switch the module\n _module._modules[name] = _tmp\n\n up_weight = loras.pop(0)\n down_weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight = nn.Parameter(\n up_weight.type(weight.dtype)\n )\n _module._modules[name].lora_down.weight = nn.Parameter(\n down_weight.type(weight.dtype)\n )\n\n _module._modules[name].to(weight.device)"
},
{
"identifier": "monkeypatch_or_replace_lora_extended",
"path": "utils/lora.py",
"snippet": "def monkeypatch_or_replace_lora_extended(\n model,\n loras,\n target_replace_module=DEFAULT_TARGET_REPLACE,\n r: Union[int, List[int]] = 4,\n):\n for _module, name, _child_module in _find_modules(\n model,\n target_replace_module,\n search_class=[\n nn.Linear, \n nn.Conv2d, \n nn.Conv3d,\n LoraInjectedLinear, \n LoraInjectedConv2d, \n LoraInjectedConv3d,\n ],\n ):\n\n if (_child_module.__class__ == nn.Linear) or (\n _child_module.__class__ == LoraInjectedLinear\n ):\n if len(loras[0].shape) != 2:\n continue\n\n _source = (\n _child_module.linear\n if isinstance(_child_module, LoraInjectedLinear)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedLinear(\n _source.in_features,\n _source.out_features,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n _tmp.linear.weight = weight\n\n if bias is not None:\n _tmp.linear.bias = bias\n\n elif (_child_module.__class__ == nn.Conv2d) or (\n _child_module.__class__ == LoraInjectedConv2d\n ):\n if len(loras[0].shape) != 4:\n continue\n _source = (\n _child_module.conv\n if isinstance(_child_module, LoraInjectedConv2d)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedConv2d(\n _source.in_channels,\n _source.out_channels,\n _source.kernel_size,\n _source.stride,\n _source.padding,\n _source.dilation,\n _source.groups,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n\n _tmp.conv.weight = weight\n\n if bias is not None:\n _tmp.conv.bias = bias\n\n elif _child_module.__class__ == nn.Conv3d or(\n _child_module.__class__ == LoraInjectedConv3d\n ):\n\n if len(loras[0].shape) != 5:\n continue\n\n _source = (\n _child_module.conv\n if isinstance(_child_module, LoraInjectedConv3d)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedConv3d(\n _source.in_channels,\n _source.out_channels,\n bias=_source.bias is not None,\n kernel_size=_source.kernel_size,\n padding=_source.padding,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n\n _tmp.conv.weight = weight\n\n if bias is not None:\n _tmp.conv.bias = bias\n\n # switch the module\n _module._modules[name] = _tmp\n\n up_weight = loras.pop(0)\n down_weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight = nn.Parameter(\n up_weight.type(weight.dtype)\n )\n _module._modules[name].lora_down.weight = nn.Parameter(\n down_weight.type(weight.dtype)\n )\n\n _module._modules[name].to(weight.device)"
}
] | import os
import torch
from logging import warnings
from typing import Union
from types import SimpleNamespace
from models.unet_3d_condition import UNet3DConditionModel
from transformers import CLIPTextModel
from utils.convert_diffusers_to_original_ms_text_to_video import convert_unet_state_dict, convert_text_enc_state_dict_v20
from .lora import (
extract_lora_ups_down,
inject_trainable_lora_extended,
save_lora_weight,
train_patch_pipe,
monkeypatch_or_replace_lora,
monkeypatch_or_replace_lora_extended
) | 10,785 | self,
version: LORA_VERSIONS = LoraVersions.cloneofsimo,
use_unet_lora: bool = False,
use_text_lora: bool = False,
save_for_webui: bool = False,
only_for_webui: bool = False,
lora_bias: str = 'none',
unet_replace_modules: list = None,
text_encoder_replace_modules: list = None
):
self.version = version
self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)
self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)
self.lora_bias = lora_bias
self.use_unet_lora = use_unet_lora
self.use_text_lora = use_text_lora
self.save_for_webui = save_for_webui
self.only_for_webui = only_for_webui
self.unet_replace_modules = unet_replace_modules
self.text_encoder_replace_modules = text_encoder_replace_modules
self.use_lora = any([use_text_lora, use_unet_lora])
def is_cloneofsimo_lora(self):
return self.version == LoraVersions.cloneofsimo
def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):
if self.is_cloneofsimo_lora():
if func_type == LoraFuncTypes.loader:
return monkeypatch_or_replace_lora_extended
if func_type == LoraFuncTypes.injector:
return inject_trainable_lora_extended
assert "LoRA Version does not exist."
def check_lora_ext(self, lora_file: str):
return lora_file.endswith(tuple(LORA_FILE_TYPES))
def get_lora_file_path(
self,
lora_path: str,
model: Union[UNet3DConditionModel, CLIPTextModel]
):
if os.path.exists(lora_path):
lora_filenames = [fns for fns in os.listdir(lora_path)]
is_lora = self.check_lora_ext(lora_path)
is_unet = isinstance(model, UNet3DConditionModel)
is_text = isinstance(model, CLIPTextModel)
idx = 0 if is_unet else 1
base_name = FILE_BASENAMES[idx]
for lora_filename in lora_filenames:
is_lora = self.check_lora_ext(lora_filename)
if not is_lora:
continue
if base_name in lora_filename:
return os.path.join(lora_path, lora_filename)
return None
def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):
self.lora_loader(**lora_loader_args)
print(f"Successfully loaded LoRA from: {file_name}")
def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):
try:
lora_file = self.get_lora_file_path(lora_path, model)
if lora_file is not None:
lora_loader_args.update({"lora_path": lora_file})
self.handle_lora_load(lora_file, lora_loader_args)
else:
print(f"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...")
except Exception as e:
print(f"An error occured while loading a LoRA file: {e}")
def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias, scale):
return_dict = lora_args.copy()
if self.is_cloneofsimo_lora():
return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)
return_dict.update({
"model": model,
"loras": self.get_lora_file_path(lora_path, model),
"target_replace_module": replace_modules,
"r": r,
"scale": scale,
"dropout_p": dropout,
})
return return_dict
def do_lora_injection(
self,
model,
replace_modules,
bias='none',
dropout=0,
r=4,
lora_loader_args=None,
):
REPLACE_MODULES = replace_modules
params = None
negation = None
is_injection_hybrid = False
if self.is_cloneofsimo_lora():
is_injection_hybrid = True
injector_args = lora_loader_args
params, negation = self.lora_injector(**injector_args) # inject_trainable_lora_extended
|
FILE_BASENAMES = ['unet', 'text_encoder']
LORA_FILE_TYPES = ['.pt', '.safetensors']
CLONE_OF_SIMO_KEYS = ['model', 'loras', 'target_replace_module', 'r']
STABLE_LORA_KEYS = ['model', 'target_module', 'search_class', 'r', 'dropout', 'lora_bias']
lora_versions = dict(
stable_lora = "stable_lora",
cloneofsimo = "cloneofsimo"
)
lora_func_types = dict(
loader = "loader",
injector = "injector"
)
lora_args = dict(
model = None,
loras = None,
target_replace_module = [],
target_module = [],
r = 4,
search_class = [torch.nn.Linear],
dropout = 0,
lora_bias = 'none'
)
LoraVersions = SimpleNamespace(**lora_versions)
LoraFuncTypes = SimpleNamespace(**lora_func_types)
LORA_VERSIONS = [LoraVersions.stable_lora, LoraVersions.cloneofsimo]
LORA_FUNC_TYPES = [LoraFuncTypes.loader, LoraFuncTypes.injector]
def filter_dict(_dict, keys=[]):
if len(keys) == 0:
assert "Keys cannot empty for filtering return dict."
for k in keys:
if k not in lora_args.keys():
assert f"{k} does not exist in available LoRA arguments"
return {k: v for k, v in _dict.items() if k in keys}
class LoraHandler(object):
def __init__(
self,
version: LORA_VERSIONS = LoraVersions.cloneofsimo,
use_unet_lora: bool = False,
use_text_lora: bool = False,
save_for_webui: bool = False,
only_for_webui: bool = False,
lora_bias: str = 'none',
unet_replace_modules: list = None,
text_encoder_replace_modules: list = None
):
self.version = version
self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)
self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)
self.lora_bias = lora_bias
self.use_unet_lora = use_unet_lora
self.use_text_lora = use_text_lora
self.save_for_webui = save_for_webui
self.only_for_webui = only_for_webui
self.unet_replace_modules = unet_replace_modules
self.text_encoder_replace_modules = text_encoder_replace_modules
self.use_lora = any([use_text_lora, use_unet_lora])
def is_cloneofsimo_lora(self):
return self.version == LoraVersions.cloneofsimo
def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):
if self.is_cloneofsimo_lora():
if func_type == LoraFuncTypes.loader:
return monkeypatch_or_replace_lora_extended
if func_type == LoraFuncTypes.injector:
return inject_trainable_lora_extended
assert "LoRA Version does not exist."
def check_lora_ext(self, lora_file: str):
return lora_file.endswith(tuple(LORA_FILE_TYPES))
def get_lora_file_path(
self,
lora_path: str,
model: Union[UNet3DConditionModel, CLIPTextModel]
):
if os.path.exists(lora_path):
lora_filenames = [fns for fns in os.listdir(lora_path)]
is_lora = self.check_lora_ext(lora_path)
is_unet = isinstance(model, UNet3DConditionModel)
is_text = isinstance(model, CLIPTextModel)
idx = 0 if is_unet else 1
base_name = FILE_BASENAMES[idx]
for lora_filename in lora_filenames:
is_lora = self.check_lora_ext(lora_filename)
if not is_lora:
continue
if base_name in lora_filename:
return os.path.join(lora_path, lora_filename)
return None
def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):
self.lora_loader(**lora_loader_args)
print(f"Successfully loaded LoRA from: {file_name}")
def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):
try:
lora_file = self.get_lora_file_path(lora_path, model)
if lora_file is not None:
lora_loader_args.update({"lora_path": lora_file})
self.handle_lora_load(lora_file, lora_loader_args)
else:
print(f"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...")
except Exception as e:
print(f"An error occured while loading a LoRA file: {e}")
def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias, scale):
return_dict = lora_args.copy()
if self.is_cloneofsimo_lora():
return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)
return_dict.update({
"model": model,
"loras": self.get_lora_file_path(lora_path, model),
"target_replace_module": replace_modules,
"r": r,
"scale": scale,
"dropout_p": dropout,
})
return return_dict
def do_lora_injection(
self,
model,
replace_modules,
bias='none',
dropout=0,
r=4,
lora_loader_args=None,
):
REPLACE_MODULES = replace_modules
params = None
negation = None
is_injection_hybrid = False
if self.is_cloneofsimo_lora():
is_injection_hybrid = True
injector_args = lora_loader_args
params, negation = self.lora_injector(**injector_args) # inject_trainable_lora_extended | for _up, _down in extract_lora_ups_down( | 3 | 2023-12-11 04:51:39+00:00 | 12k |
Theia-4869/MoSA | src/models/build_vit_backbone.py | [
{
"identifier": "VisionTransformer",
"path": "src/models/vit_backbones/vit.py",
"snippet": "class VisionTransformer(nn.Module):\n def __init__(\n self, model_type,\n img_size=224, num_classes=21843, vis=False\n ):\n super(VisionTransformer, self).__init__()\n config = CONFIGS[model_type]\n self.num_classes = num_classes\n self.classifier = config.classifier\n\n self.transformer = Transformer(config, img_size, vis)\n self.head = Linear(config.hidden_size, num_classes) if num_classes > 0 else nn.Identity()\n self.vis = vis\n\n def forward(self, x):\n x, attn_weights = self.transformer(x)\n logits = self.head(x[:, 0])\n\n if not self.vis:\n return logits\n return logits, attn_weights # attn_weights: num_layers, B, num_head, num_patches, num_patches\n \n def forward_cls_layerwise(self, x):\n cls_embeds = self.transformer.forward_cls_layerwise(x)\n return cls_embeds\n\n def load_from(self, weights):\n with torch.no_grad():\n self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights[\"embedding/kernel\"], conv=True))\n self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights[\"embedding/bias\"]))\n self.transformer.embeddings.cls_token.copy_(np2th(weights[\"cls\"]))\n self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights[\"Transformer/encoder_norm/scale\"]))\n self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights[\"Transformer/encoder_norm/bias\"]))\n\n posemb = np2th(weights[\"Transformer/posembed_input/pos_embedding\"])\n posemb_new = self.transformer.embeddings.position_embeddings\n if posemb.size() == posemb_new.size():\n self.transformer.embeddings.position_embeddings.copy_(posemb)\n else:\n logger.info(\"load_pretrained: resized variant: %s to %s\" % (posemb.size(), posemb_new.size()))\n ntok_new = posemb_new.size(1)\n\n if self.classifier == \"token\":\n posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]\n ntok_new -= 1\n else:\n posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\n\n gs_old = int(np.sqrt(len(posemb_grid)))\n gs_new = int(np.sqrt(ntok_new))\n print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))\n posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)\n\n zoom = (gs_new / gs_old, gs_new / gs_old, 1)\n posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)\n posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)\n posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)\n self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))\n\n for bname, block in self.transformer.encoder.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=uname)\n\n if self.transformer.embeddings.hybrid:\n self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights[\"conv_root/kernel\"], conv=True))\n gn_weight = np2th(weights[\"gn_root/scale\"]).view(-1)\n gn_bias = np2th(weights[\"gn_root/bias\"]).view(-1)\n self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)\n self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)\n\n for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=bname, n_unit=uname)"
},
{
"identifier": "SwinTransformer",
"path": "src/models/vit_backbones/swin_transformer.py",
"snippet": "class SwinTransformer(nn.Module):\n r\"\"\" Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,\n embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],\n window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, **kwargs):\n super().__init__()\n\n self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # absolute position embedding\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(patches_resolution[0] // (2 ** i_layer),\n patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint)\n self.layers.append(layer)\n\n self.norm = norm_layer(self.num_features)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for layer in self.layers:\n x = layer(x)\n # logger.info(x.shape)\n\n x = self.norm(x) # B L C\n x = self.avgpool(x.transpose(1, 2)) # B C 1\n x = torch.flatten(x, 1)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n def flops(self):\n flops = 0\n flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n flops += layer.flops()\n flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)\n flops += self.num_features * self.num_classes\n return flops"
},
{
"identifier": "build_model",
"path": "src/models/vit_backbones/vit_mae.py",
"snippet": "def build_model(model_type):\n if \"vitb\" in model_type:\n return vit_base_patch16()\n elif \"vitl\" in model_type:\n return vit_large_patch16()\n elif \"vith\" in model_type:\n return vit_huge_patch14()"
},
{
"identifier": "vit_base",
"path": "src/models/vit_backbones/vit_moco.py",
"snippet": "def vit_base(**kwargs):\n model = VisionTransformerMoCo(\n patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n return model"
},
{
"identifier": "AdaptedVisionTransformer",
"path": "src/models/vit_adapter/vit_adapter.py",
"snippet": "class AdaptedVisionTransformer(nn.Module):\n def __init__(\n self, model_type,\n img_size=224, num_classes=21843, vis=False, adapter_cfg=None\n ):\n super(AdaptedVisionTransformer, self).__init__()\n config = CONFIGS[model_type]\n self.num_classes = num_classes\n self.classifier = config.classifier\n\n self.transformer = AdaptedTransformer(config, img_size, vis, adapter_cfg)\n self.head = Linear(config.hidden_size, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward(self, x, vis=False):\n x, attn_weights = self.transformer(x)\n logits = self.head(x[:, 0])\n\n if not vis:\n return logits\n return logits, attn_weights\n \n\n def load_from(self, weights):\n with torch.no_grad():\n self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights[\"embedding/kernel\"], conv=True))\n self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights[\"embedding/bias\"]))\n self.transformer.embeddings.cls_token.copy_(np2th(weights[\"cls\"]))\n self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights[\"Transformer/encoder_norm/scale\"]))\n self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights[\"Transformer/encoder_norm/bias\"]))\n\n posemb = np2th(weights[\"Transformer/posembed_input/pos_embedding\"])\n posemb_new = self.transformer.embeddings.position_embeddings\n if posemb.size() == posemb_new.size():\n self.transformer.embeddings.position_embeddings.copy_(posemb)\n else:\n logger.info(\"load_pretrained: resized variant: %s to %s\" % (posemb.size(), posemb_new.size()))\n ntok_new = posemb_new.size(1)\n\n if self.classifier == \"token\":\n posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]\n ntok_new -= 1\n else:\n posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\n\n gs_old = int(np.sqrt(len(posemb_grid)))\n gs_new = int(np.sqrt(ntok_new))\n print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))\n posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)\n\n zoom = (gs_new / gs_old, gs_new / gs_old, 1)\n posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)\n posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)\n posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)\n self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))\n\n for bname, block in self.transformer.encoder.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=uname)\n\n if self.transformer.embeddings.hybrid:\n self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights[\"conv_root/kernel\"], conv=True))\n gn_weight = np2th(weights[\"gn_root/scale\"]).view(-1)\n gn_bias = np2th(weights[\"gn_root/bias\"]).view(-1)\n self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)\n self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)\n\n for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=bname, n_unit=uname)"
},
{
"identifier": "AdaptedSwinTransformer",
"path": "src/models/vit_adapter/swin_adapter.py",
"snippet": "class AdaptedSwinTransformer(SwinTransformer):\n def __init__(\n self, adapter_config, img_size=224, patch_size=4, in_chans=3, \n num_classes=1000, embed_dim=96, depths=[2, 2, 6, 2], \n num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4., qkv_bias=True, \n qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, **kwargs\n ):\n super(AdaptedSwinTransformer, self).__init__(\n img_size, patch_size, in_chans, num_classes, embed_dim, depths,\n num_heads, window_size, mlp_ratio, qkv_bias, qk_scale, drop_rate,\n attn_drop_rate, drop_path_rate, norm_layer, ape, patch_norm,\n use_checkpoint, **kwargs\n )\n self.adapter_config = adapter_config\n \n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(\n self.patches_resolution[0] // (2 ** i_layer),\n self.patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint,\n block_module=AdaptedSwinTransformerBlock,\n adapter_config=adapter_config\n )\n self.layers.append(layer)"
},
{
"identifier": "MoSAVisionTransformer",
"path": "src/models/vit_adapter/vit_mosa.py",
"snippet": "class MoSAVisionTransformer(nn.Module):\n def __init__(\n self, model_type,\n img_size=224, num_classes=21843, mid=False, vis=False, adapter_cfg=None\n ):\n super(MoSAVisionTransformer, self).__init__()\n config = CONFIGS[model_type]\n self.num_classes = num_classes\n self.classifier = config.classifier\n\n self.transformer = MoSATransformer(config, img_size, mid, vis, adapter_cfg)\n self.head = Linear(config.hidden_size, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward(self, x, mid=False, vis=False):\n x, hidden_states, attn_weights = self.transformer(x)\n logits = self.head(x[:, 0])\n\n if not mid and not vis:\n return logits\n elif mid and not vis:\n return logits, hidden_states\n elif vis and not mid:\n return logits, attn_weights\n else:\n return logits, hidden_states, attn_weights\n\n\n def load_from(self, weights):\n with torch.no_grad():\n self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights[\"embedding/kernel\"], conv=True))\n self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights[\"embedding/bias\"]))\n self.transformer.embeddings.cls_token.copy_(np2th(weights[\"cls\"]))\n self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights[\"Transformer/encoder_norm/scale\"]))\n self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights[\"Transformer/encoder_norm/bias\"]))\n\n posemb = np2th(weights[\"Transformer/posembed_input/pos_embedding\"])\n posemb_new = self.transformer.embeddings.position_embeddings\n if posemb.size() == posemb_new.size():\n self.transformer.embeddings.position_embeddings.copy_(posemb)\n else:\n logger.info(\"load_pretrained: resized variant: %s to %s\" % (posemb.size(), posemb_new.size()))\n ntok_new = posemb_new.size(1)\n\n if self.classifier == \"token\":\n posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]\n ntok_new -= 1\n else:\n posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\n\n gs_old = int(np.sqrt(len(posemb_grid)))\n gs_new = int(np.sqrt(ntok_new))\n print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))\n posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)\n\n zoom = (gs_new / gs_old, gs_new / gs_old, 1)\n posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)\n posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)\n posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)\n self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))\n\n for bname, block in self.transformer.encoder.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=uname)\n\n if self.transformer.embeddings.hybrid:\n self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights[\"conv_root/kernel\"], conv=True))\n gn_weight = np2th(weights[\"gn_root/scale\"]).view(-1)\n gn_bias = np2th(weights[\"gn_root/bias\"]).view(-1)\n self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)\n self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)\n\n for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=bname, n_unit=uname)\n \n def merge(self, mode='add'):\n for blk in self.transformer.encoder.layer:\n blk.merge(mode)"
},
{
"identifier": "MoSASwinTransformer",
"path": "src/models/vit_adapter/swin_mosa.py",
"snippet": "class MoSASwinTransformer(SwinTransformer):\n def __init__(\n self, adapter_config, mid=False, img_size=224, patch_size=4, in_chans=3, \n num_classes=1000, embed_dim=96, depths=[2, 2, 6, 2], \n num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4., qkv_bias=True, \n qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, **kwargs\n ):\n super(MoSASwinTransformer, self).__init__(\n img_size, patch_size, in_chans, num_classes, embed_dim, depths,\n num_heads, window_size, mlp_ratio, qkv_bias, qk_scale, drop_rate,\n attn_drop_rate, drop_path_rate, norm_layer, ape, patch_norm,\n use_checkpoint, **kwargs\n )\n self.adapter_config = adapter_config\n \n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(\n self.patches_resolution[0] // (2 ** i_layer),\n self.patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint,\n block_module=MoSASwinTransformerBlock,\n adapter_config=adapter_config\n )\n self.layers.append(layer)\n \n def merge(self, mode='add'):\n for layer in self.layers:\n for blk in layer.blocks:\n blk.mlp.merge(mode)"
},
{
"identifier": "build_model",
"path": "src/models/vit_adapter/vit_mae.py",
"snippet": "def build_model(model_type, adapter_cfg):\n if \"vitb\" in model_type:\n return vit_base_patch16(adapter_cfg)\n elif \"vitl\" in model_type:\n return vit_large_patch16(adapter_cfg)\n elif \"vith\" in model_type:\n return vit_huge_patch14(adapter_cfg)"
},
{
"identifier": "vit_base",
"path": "src/models/vit_adapter/vit_moco.py",
"snippet": "def vit_base(adapter_cfg, **kwargs):\n model = ADPT_VisionTransformerMoCo(\n adapter_cfg,\n patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n return model"
},
{
"identifier": "LoRAVisionTransformer",
"path": "src/models/vit_adapter/vit_lora.py",
"snippet": "class LoRAVisionTransformer(nn.Module):\n def __init__(\n self, model_type,\n img_size=224, num_classes=21843, vis=False, lora_cfg=None\n ):\n super(LoRAVisionTransformer, self).__init__()\n config = CONFIGS[model_type]\n self.num_classes = num_classes\n self.classifier = config.classifier\n\n self.transformer = LoRATransformer(config, img_size, vis, lora_cfg)\n self.head = Linear(config.hidden_size, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward(self, x, vis=False):\n x, attn_weights = self.transformer(x)\n logits = self.head(x[:, 0])\n\n if not vis:\n return logits\n return logits, attn_weights\n\n def load_from(self, weights):\n with torch.no_grad():\n self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights[\"embedding/kernel\"], conv=True))\n self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights[\"embedding/bias\"]))\n self.transformer.embeddings.cls_token.copy_(np2th(weights[\"cls\"]))\n self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights[\"Transformer/encoder_norm/scale\"]))\n self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights[\"Transformer/encoder_norm/bias\"]))\n\n posemb = np2th(weights[\"Transformer/posembed_input/pos_embedding\"])\n posemb_new = self.transformer.embeddings.position_embeddings\n if posemb.size() == posemb_new.size():\n self.transformer.embeddings.position_embeddings.copy_(posemb)\n else:\n logger.info(\"load_pretrained: resized variant: %s to %s\" % (posemb.size(), posemb_new.size()))\n ntok_new = posemb_new.size(1)\n\n if self.classifier == \"token\":\n posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]\n ntok_new -= 1\n else:\n posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\n\n gs_old = int(np.sqrt(len(posemb_grid)))\n gs_new = int(np.sqrt(ntok_new))\n print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))\n posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)\n\n zoom = (gs_new / gs_old, gs_new / gs_old, 1)\n posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)\n posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)\n posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)\n self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))\n\n for bname, block in self.transformer.encoder.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=uname)\n\n if self.transformer.embeddings.hybrid:\n self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights[\"conv_root/kernel\"], conv=True))\n gn_weight = np2th(weights[\"gn_root/scale\"]).view(-1)\n gn_bias = np2th(weights[\"gn_root/bias\"]).view(-1)\n self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)\n self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)\n\n for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=bname, n_unit=uname)"
},
{
"identifier": "MoSLVisionTransformer",
"path": "src/models/vit_adapter/vit_mosl.py",
"snippet": "class MoSLVisionTransformer(nn.Module):\n def __init__(\n self, model_type,\n img_size=224, num_classes=21843, mid=False, vis=False, lora_cfg=None\n ):\n super(MoSLVisionTransformer, self).__init__()\n config = CONFIGS[model_type]\n self.num_classes = num_classes\n self.classifier = config.classifier\n\n self.transformer = MoSLTransformer(config, img_size, mid, vis, lora_cfg)\n self.head = Linear(config.hidden_size, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward(self, x, mid=False, vis=False):\n x, hidden_states, attn_weights = self.transformer(x)\n logits = self.head(x[:, 0])\n\n if not mid and not vis:\n return logits\n elif mid and not vis:\n return logits, hidden_states\n elif vis and not mid:\n return logits, attn_weights\n else:\n return logits, hidden_states, attn_weights\n\n def load_from(self, weights):\n with torch.no_grad():\n self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights[\"embedding/kernel\"], conv=True))\n self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights[\"embedding/bias\"]))\n self.transformer.embeddings.cls_token.copy_(np2th(weights[\"cls\"]))\n self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights[\"Transformer/encoder_norm/scale\"]))\n self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights[\"Transformer/encoder_norm/bias\"]))\n\n posemb = np2th(weights[\"Transformer/posembed_input/pos_embedding\"])\n posemb_new = self.transformer.embeddings.position_embeddings\n if posemb.size() == posemb_new.size():\n self.transformer.embeddings.position_embeddings.copy_(posemb)\n else:\n logger.info(\"load_pretrained: resized variant: %s to %s\" % (posemb.size(), posemb_new.size()))\n ntok_new = posemb_new.size(1)\n\n if self.classifier == \"token\":\n posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]\n ntok_new -= 1\n else:\n posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\n\n gs_old = int(np.sqrt(len(posemb_grid)))\n gs_new = int(np.sqrt(ntok_new))\n print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))\n posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)\n\n zoom = (gs_new / gs_old, gs_new / gs_old, 1)\n posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)\n posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)\n posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)\n self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))\n\n for bname, block in self.transformer.encoder.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=uname)\n\n if self.transformer.embeddings.hybrid:\n self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights[\"conv_root/kernel\"], conv=True))\n gn_weight = np2th(weights[\"gn_root/scale\"]).view(-1)\n gn_bias = np2th(weights[\"gn_root/bias\"]).view(-1)\n self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)\n self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)\n\n for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=bname, n_unit=uname)\n \n def merge(self, mode='add'):\n for blk in self.transformer.encoder.layer:\n blk.attn.merge(mode)"
}
] | import numpy as np
import torch
import os
from .vit_backbones.vit import VisionTransformer
from .vit_backbones.swin_transformer import SwinTransformer
from .vit_backbones.vit_mae import build_model as mae_vit_model
from .vit_backbones.vit_moco import vit_base
from .vit_adapter.vit_adapter import AdaptedVisionTransformer
from .vit_adapter.swin_adapter import AdaptedSwinTransformer
from .vit_adapter.vit_mosa import MoSAVisionTransformer
from .vit_adapter.swin_mosa import MoSASwinTransformer
from .vit_adapter.vit_mae import build_model as adapter_mae_vit_model
from .vit_adapter.vit_moco import vit_base as adapter_vit_base
from .vit_adapter.vit_lora import LoRAVisionTransformer
from .vit_adapter.vit_mosl import MoSLVisionTransformer | 10,678 | embed_dim = 96
num_layers = 4
elif model_type == "swinb_imagenet_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet_384":
model = SwinTransformer(
img_size=384,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet22k_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet22k_384":
model = SwinTransformer(
img_size=384,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinl_imagenet22k_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 192
num_layers = 4
feat_dim = int(embed_dim * 2 ** (num_layers - 1))
# load checkpoint
model_w = os.path.join(model_root, MODEL_ZOO[model_type])
checkpoint = torch.load(model_w, map_location='cpu')
state_dict = checkpoint['model']
if crop_size == 448:
for k in list(state_dict.keys()):
if "attn_mask" not in k:
# remove prefix
state_dict[k] = state_dict[k]
# delete renamed or unused k
else:
del state_dict[k]
# rename some keys for ssl models
if model_type.endswith("ssl"):
# rename moco pre-trained keys
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('encoder.'):
# remove prefix
state_dict[k[len("encoder."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
model.load_state_dict(state_dict, strict=False)
return model, feat_dim
def build_vit_sup_models(
model_type, crop_size, model_root=None, adapter_cfg=None, lora_cfg=None, load_pretrain=True, vis=False
):
# image size is the size of actual image
m2featdim = {
"sup_vitb16_224": 768,
"sup_vitb16": 768,
"sup_vitl16_224": 1024,
"sup_vitl16": 1024,
"sup_vitb8_imagenet21k": 768,
"sup_vitb16_imagenet21k": 768,
"sup_vitb32_imagenet21k": 768,
"sup_vitl16_imagenet21k": 1024,
"sup_vitl32_imagenet21k": 1024,
"sup_vith14_imagenet21k": 1280,
}
if adapter_cfg is not None:
if adapter_cfg.MOE:
model = MoSAVisionTransformer(model_type, crop_size, num_classes=-1, adapter_cfg=adapter_cfg, mid=adapter_cfg.DEEPREG)
else:
model = AdaptedVisionTransformer(model_type, crop_size, num_classes=-1, adapter_cfg=adapter_cfg)
elif lora_cfg is not None:
if lora_cfg.MOE:
model = MoSLVisionTransformer(model_type, crop_size, num_classes=-1, lora_cfg=lora_cfg, mid=lora_cfg.DEEPREG)
else:
| #!/usr/bin/env python3
MODEL_ZOO = {
"swint_imagenet": "swin_tiny_patch4_window7_224.pth",
"swint_imagenet_ssl": "moby_swin_t_300ep_pretrained.pth",
"swins_imagenet": "swin_small_patch4_window7_224.pth",
"swinb_imagenet_224": "swin_base_patch4_window7_224.pth",
"swinb_imagenet_384": "swin_base_patch4_window12_384.pth",
"swinb_imagenet22k_224": "swin_base_patch4_window7_224_22k.pth",
"swinb_imagenet22k_384": "swin_base_patch4_window12_384_22k.pth",
"swinl_imagenet22k_224": "swin_large_patch4_window7_224_22k.pth",
"sup_vitb8": "ViT-B_8.npz",
"sup_vitb16_224": "ViT-B_16-224.npz",
"sup_vitb16": "ViT-B_16.npz",
"sup_vitl16_224": "ViT-L_16-224.npz",
"sup_vitl16": "ViT-L_16.npz",
"sup_vitb8_imagenet21k": "imagenet21k_ViT-B_8.npz",
"sup_vitb32_imagenet21k": "imagenet21k_ViT-B_32.npz",
"sup_vitb16_imagenet21k": "imagenet21k_ViT-B_16.npz",
"sup_vitl16_imagenet21k": "imagenet21k_ViT-L_16.npz",
"sup_vitl32_imagenet21k": "imagenet21k_ViT-L_32.npz",
"sup_vith14_imagenet21k": "imagenet21k_ViT-H_14.npz",
"mae_vith14": "mae_pretrain_vit_huge.pth",
"mae_vitb16": "mae_pretrain_vit_base.pth",
"mae_vitl16": "mae_pretrain_vit_large.pth",
}
def build_mae_model(
model_type, crop_size, prompt_cfg, model_root, adapter_cfg=None
):
if adapter_cfg is not None:
model = adapter_mae_vit_model(model_type, adapter_cfg)
else:
model = mae_vit_model(model_type)
out_dim = model.embed_dim
ckpt = os.path.join(model_root, MODEL_ZOO[model_type])
checkpoint = torch.load(ckpt, map_location="cpu")
state_dict = checkpoint['model']
model.load_state_dict(state_dict, strict=False)
model.head = torch.nn.Identity()
return model, out_dim
def build_mocov3_model(
model_type, crop_size, prompt_cfg, model_root, adapter_cfg=None
):
if model_type != "mocov3_vitb":
raise ValueError("Does not support other arch")
if adapter_cfg is not None:
model = adapter_vit_base(adapter_cfg)
else:
model = vit_base()
out_dim = 768
ckpt = os.path.join(model_root,"mocov3_linear-vit-b-300ep.pth.tar")
checkpoint = torch.load(ckpt, map_location="cpu")
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
# retain only base_encoder up to before the embedding layer
if k.startswith('module.'):
# remove prefix
state_dict[k[len("module."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
model.load_state_dict(state_dict, strict=False)
model.head = torch.nn.Identity()
return model, out_dim
def build_swin_model(model_type, crop_size, model_root, adapter_cfg, lora_cfg, load_pretrain, vis):
if adapter_cfg is not None:
return _build_adapted_swin_model(model_type, crop_size, adapter_cfg, model_root)
else:
return _build_swin_model(model_type, crop_size, model_root)
def _build_adapted_swin_model(model_type, crop_size, adapter_cfg, model_root):
if model_type == "swinb_imagenet22k_224":
if adapter_cfg.MOE:
model = MoSASwinTransformer(
adapter_config=adapter_cfg,
mid=adapter_cfg.DEEPREG,
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
else:
model = AdaptedSwinTransformer(
adapter_config=adapter_cfg,
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
feat_dim = int(embed_dim * 2 ** (num_layers - 1))
# load checkpoint
model_w = os.path.join(model_root, MODEL_ZOO[model_type])
checkpoint = torch.load(model_w, map_location='cpu')
state_dict = checkpoint['model']
if crop_size == 448:
for k in list(state_dict.keys()):
if "attn_mask" not in k:
# remove prefix
state_dict[k] = state_dict[k]
# delete renamed or unused k
else:
del state_dict[k]
# rename some keys for ssl models
if model_type.endswith("ssl"):
# rename moco pre-trained keys
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('encoder.'):
# remove prefix
state_dict[k[len("encoder."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
model.load_state_dict(state_dict, strict=False)
return model, feat_dim
def _build_swin_model(model_type, crop_size, model_root):
if model_type == "swint_imagenet":
model = SwinTransformer(
img_size=crop_size,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
drop_path_rate=0.2,
num_classes=-1, # setting to a negative value will make head as identity
)
embed_dim = 96
num_layers = 4
elif model_type == "swint_imagenet_ssl":
model = SwinTransformer(
img_size=crop_size,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
drop_path_rate=0.2,
num_classes=-1,
)
embed_dim = 96
num_layers = 4
elif model_type == "swins_imagenet":
model = SwinTransformer(
img_size=crop_size,
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
drop_path_rate=0.3,
num_classes=-1,
)
embed_dim = 96
num_layers = 4
elif model_type == "swinb_imagenet_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet_384":
model = SwinTransformer(
img_size=384,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet22k_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet22k_384":
model = SwinTransformer(
img_size=384,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinl_imagenet22k_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 192
num_layers = 4
feat_dim = int(embed_dim * 2 ** (num_layers - 1))
# load checkpoint
model_w = os.path.join(model_root, MODEL_ZOO[model_type])
checkpoint = torch.load(model_w, map_location='cpu')
state_dict = checkpoint['model']
if crop_size == 448:
for k in list(state_dict.keys()):
if "attn_mask" not in k:
# remove prefix
state_dict[k] = state_dict[k]
# delete renamed or unused k
else:
del state_dict[k]
# rename some keys for ssl models
if model_type.endswith("ssl"):
# rename moco pre-trained keys
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('encoder.'):
# remove prefix
state_dict[k[len("encoder."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
model.load_state_dict(state_dict, strict=False)
return model, feat_dim
def build_vit_sup_models(
model_type, crop_size, model_root=None, adapter_cfg=None, lora_cfg=None, load_pretrain=True, vis=False
):
# image size is the size of actual image
m2featdim = {
"sup_vitb16_224": 768,
"sup_vitb16": 768,
"sup_vitl16_224": 1024,
"sup_vitl16": 1024,
"sup_vitb8_imagenet21k": 768,
"sup_vitb16_imagenet21k": 768,
"sup_vitb32_imagenet21k": 768,
"sup_vitl16_imagenet21k": 1024,
"sup_vitl32_imagenet21k": 1024,
"sup_vith14_imagenet21k": 1280,
}
if adapter_cfg is not None:
if adapter_cfg.MOE:
model = MoSAVisionTransformer(model_type, crop_size, num_classes=-1, adapter_cfg=adapter_cfg, mid=adapter_cfg.DEEPREG)
else:
model = AdaptedVisionTransformer(model_type, crop_size, num_classes=-1, adapter_cfg=adapter_cfg)
elif lora_cfg is not None:
if lora_cfg.MOE:
model = MoSLVisionTransformer(model_type, crop_size, num_classes=-1, lora_cfg=lora_cfg, mid=lora_cfg.DEEPREG)
else: | model = LoRAVisionTransformer(model_type, crop_size, num_classes=-1, lora_cfg=lora_cfg) | 10 | 2023-12-06 07:50:16+00:00 | 12k |
khwong-c/syn-magia | tests/bundle/test_bundle.py | [
{
"identifier": "Input",
"path": "magia/core.py",
"snippet": "class Input(Signal):\n \"\"\"\n Representing an input signal.\n It has no driver, but it is driving other signals.\n It is used by both the module declaration and the module instance.\n \"\"\"\n\n def __init__(\n self,\n name: str, width: int, signed: bool = False,\n owner_instance: Optional[\"Instance\"] = None,\n **kwargs\n ):\n \"\"\"\n I/O ports must have name and width well-defined by designers.\n \"\"\"\n if name is None:\n raise ValueError(\"Input name is not set\")\n if width == 0:\n raise ValueError(\"Input width is not set\")\n\n super().__init__(name=name, width=width, signed=signed, **kwargs)\n self._config.signal_type = SignalType.INPUT\n self._config.owner_instance = owner_instance\n\n @property\n def net_name(self) -> str:\n \"\"\"\n Net Name of I/O must be the same with the name, even they are within an IOBundle\n \"\"\"\n return self.name\n\n def elaborate(self) -> str:\n \"\"\"\n Elaborate the input signal in the module declaration.\n :return: input logic (signed) [...]PORT_NAME\n \"\"\"\n port_decl = self.signal_decl().rstrip(\";\")\n return f\"input {port_decl}\"\n\n def copy(self, owner_instance: Optional[\"Instance\"] = None) -> \"Input\":\n \"\"\"\n Copy the input signal. Driver is discarded.\n I/O port can only be assigned to an instance, not a SignalBundle / IOBundle.\n :return: A new input signal with the same configuration.\n \"\"\"\n return Input(\n name=self.name,\n width=len(self),\n signed=self.signed,\n description=self.description,\n owner_instance=owner_instance,\n )"
},
{
"identifier": "Output",
"path": "magia/core.py",
"snippet": "class Output(Signal):\n \"\"\"\n Representing an output signal.\n They are the starting points when we elaborate the module.\n It is used by both the module declaration and the module instance.\n \"\"\"\n\n def __init__(\n self,\n name: str, width: int, signed: bool = False,\n owner_instance: Optional[\"Instance\"] = None,\n **kwargs\n ):\n \"\"\"\n I/O ports must have name and width well-defined by designers.\n \"\"\"\n if name is None:\n raise ValueError(\"Output name is not set\")\n if width == 0:\n raise ValueError(\"Output width is not set\")\n super().__init__(name=name, width=width, signed=signed, **kwargs)\n self._config.signal_type = SignalType.OUTPUT\n self._config.owner_instance = owner_instance\n\n @property\n def net_name(self) -> str:\n \"\"\"\n Net Name of I/O must be the same with the name, even they are within an IOBundle\n \"\"\"\n return self.name\n\n def elaborate(self) -> str:\n \"\"\"\n Elaborate the output signal in the module declaration.\n :return: output logic (signed) [...]PORT_NAME\n \"\"\"\n port_decl = self.signal_decl().rstrip(\";\")\n return f\"output {port_decl}\"\n\n def copy(self, owner_instance: Optional[\"Instance\"] = None, **kwargs) -> \"Output\":\n \"\"\"\n Copy the output signal. Driver is discarded.\n I/O port can only be assigned to an instance, not a SignalBundle / IOBundle.\n :return: A new output signal with the same configuration.\n \"\"\"\n return Output(\n name=self.name,\n width=len(self),\n signed=self.signed,\n description=self.description,\n owner_instance=owner_instance,\n )"
},
{
"identifier": "Signal",
"path": "magia/core.py",
"snippet": "class Signal(Synthesizable):\n \"\"\"\n The general signal class. It has drivers, which is another signal.\n It can also drive other signals / module instances.\n \"\"\"\n SINGLE_DRIVER_NAME: str = \"d\"\n _SIGNAL_DECL_TEMPLATE = Template(\"logic $signed $width $name;\")\n _SIGNAL_CONNECT_TEMPLATE = Template(\"always_comb\\n $name = $driver;\")\n _SIGNAL_ASSIGN_TEMPLATE = Template(\"assign $name = $driver;\")\n\n _new_signal_counter = count(0)\n\n def __init__(\n self,\n width: int = 0, signed: bool = False,\n name: Optional[str] = None,\n parent_bundle: Optional[\"SignalBundle\"] = None,\n description: Optional[str] = None,\n **kwargs\n ):\n if name is None:\n name = f\"net_{next(self._new_signal_counter)}\"\n\n super().__init__(**kwargs)\n self._config = SignalConfig(\n name=name,\n width=width,\n signed=signed,\n parent_bundle=parent_bundle,\n description=\"\" if description is None else description,\n )\n self._drivers = SignalDict()\n\n @property\n def net_name(self) -> str:\n \"\"\"\n Full name of a signal, used for elaboration.\n \"\"\"\n if self._config.parent_bundle is not None:\n if self._config.parent_bundle.name is not None:\n return f\"bundle_{self._config.parent_bundle.name}_{self.name}\"\n return f\"bundle_{id(self._config.parent_bundle)}_{self.name}\"\n return self.name\n\n @property\n def name(self) -> str:\n \"\"\"\n Short name of the signal, is used to identify the signal in a bundle / SignalDict\n \"\"\"\n return self._config.name\n\n @property\n def description(self) -> str:\n \"\"\"\n Description of the signal\n \"\"\"\n return self._config.description\n\n @property\n def type(self) -> SignalType:\n return self._config.signal_type\n\n @property\n def signed(self) -> bool:\n return self._config.signed\n\n def driver(self, driver_name: str = SINGLE_DRIVER_NAME) -> Optional[\"Signal\"]:\n \"\"\"\n Get the driver of the signal.\n :param driver_name: The name of the driver. Default to the single driver.\n :return: The driver signal.\n \"\"\"\n return self._drivers.get(driver_name)\n\n @property\n def drivers(self) -> list[\"Signal\"]:\n \"\"\"\n Get the drivers of the signal.\n :return: The driver signals.\n \"\"\"\n return list(self._drivers.values())\n\n @property\n def owner_instance(self) -> Optional[\"Instance\"]:\n \"\"\"\n Get the module instance that owns this signal.\n It is applicable to input / output signals only.\n \"\"\"\n return self._config.owner_instance\n\n def set_width(self, width: int):\n self._config.width = width\n return self\n\n def set_signed(self, signed: bool):\n self._config.signed = signed\n return self\n\n def set_name(self, name: str):\n self._config.name = name\n return self\n\n def with_signed(self, signed: bool) -> \"Signal\":\n \"\"\"\n Create a new signal with the same configuration, but with a different signedness.\n Connect the original signal to the new signal.\n\n New Signal is not added to the parent bundle.\n\n :return: A new signal with the same configuration.\n \"\"\"\n signal = Signal(\n width=len(self),\n signed=signed,\n parent_bundle=None,\n )\n signal <<= self\n return signal\n\n def with_width(self, width: int) -> \"Signal\":\n \"\"\"\n Create a new signal with the same configuration, but with a different width.\n Connect the original signal to the new signal.\n\n New Signal is not added to the parent bundle.\n\n :return: A new signal with the new configuration.\n \"\"\"\n if width == len(self):\n signal = Signal(\n width=width,\n signed=self.signed,\n parent_bundle=None,\n )\n signal <<= self\n return signal\n if width < len(self):\n return self[width - 1:]\n\n # Perform sign extension / padding according to the signedness of the signal\n padding_size = (width - len(self))\n if self.signed:\n return self[(-1,) * padding_size, :]\n return Constant(0, padding_size) @ self\n\n def signal_decl(self) -> str:\n \"\"\"\n Declare the signal in the module implementation.\n :return: logic (signed) [...]SIGNAL_NAME\n \"\"\"\n if self.net_name is None:\n raise ValueError(\"Signal name is not set\")\n if len(self) == 0:\n raise ValueError(\"Signal width is not set and cannot be inferred\")\n\n return self._SIGNAL_DECL_TEMPLATE.substitute(\n signed=\"signed\" if self.signed else \"\",\n width=f\"[{width - 1}:0]\" if (width := len(self)) > 1 else \"\",\n name=self.net_name,\n )\n\n def elaborate(self) -> str:\n signal_decl = self.signal_decl()\n\n # Ignore assignment signal if it is driven by an output of a module instance\n if self.driver().type != SignalType.OUTPUT:\n assignment = self._SIGNAL_ASSIGN_TEMPLATE.substitute(\n name=self.net_name,\n driver=self.driver().net_name,\n )\n return \"\\n\".join((signal_decl, assignment))\n return signal_decl\n\n def copy(self, parent_bundle: Optional[\"SignalBundle\"] = None, **kwargs) -> \"Signal\":\n \"\"\"\n Copy the signal. Driver is discarded.\n Signal can only be copied to a SignalBundle, not an IOBundle.\n :return: A new signal with the same configuration.\n \"\"\"\n return Signal(\n name=self.name,\n width=len(self),\n signed=self.signed,\n parent_bundle=parent_bundle,\n )\n\n def __ilshift__(self, other):\n \"\"\"\n Connect the signal with the driver.\n :param other: Driving Signal\n :return: Original Signal\n \"\"\"\n if isinstance(other, (int, bytes)):\n other = Constant(other, len(self), self.signed)\n if not isinstance(other, Signal):\n raise TypeError(f\"Cannot assign {type(other)} to drive {type(self)}\")\n if self._drivers.get(self.SINGLE_DRIVER_NAME) is not None:\n raise ValueError(f\"Multiple driver on Signal {self.name}.\")\n if self.type == SignalType.OUTPUT and self.owner_instance is not None:\n raise ValueError(\"Cannot drive output of a module instance.\")\n if other.type == SignalType.INPUT and other.owner_instance is not None:\n raise ValueError(\"Input of a module instance cannot drive other signal.\")\n if self.type == SignalType.INPUT and self.owner_instance is None:\n raise ValueError(\"Cannot drive the Input of a module type.\")\n if other.type == SignalType.OUTPUT and other.owner_instance is None:\n raise ValueError(\"Output of a module type cannot drive other signal.\")\n if self.type == SignalType.CONSTANT:\n raise ValueError(\"Constant signal cannot be driven.\")\n\n self._drivers[self.SINGLE_DRIVER_NAME] = other\n if len(self) == 0:\n self.set_width(len(other))\n elif len(other) == 0:\n other.set_width(len(self))\n return self\n\n def __add__(self, other) -> \"Signal\":\n return Operation.create(OPType.ADD, self, other)\n\n def __iadd__(self, other) -> \"Signal\":\n return self.__add__(other)\n\n def __sub__(self, other) -> \"Signal\":\n return Operation.create(OPType.MINUS, self, other)\n\n def __isub__(self, other) -> \"Signal\":\n return self.__sub__(other)\n\n def __neg__(self) -> \"Signal\":\n return Operation.create(\n OPType.MINUS,\n Constant(0, len(self), self.signed),\n self\n )\n\n def __mul__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.MUL, self, other)\n\n def __imul__(self, other) -> \"Signal\":\n return self.__mul__(other)\n\n def __eq__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.EQ, self, other)\n\n def __ne__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.NEQ, self, other)\n\n def __ge__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.GE, self, other)\n\n def __gt__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.GT, self, other)\n\n def __le__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.LE, self, other)\n\n def __lt__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.LT, self, other)\n\n def __and__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.AND, self, other)\n\n def __iand__(self, other) -> \"Signal\":\n return self.__and__(other)\n\n def __or__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.OR, self, other)\n\n def __ior__(self, other) -> \"Signal\":\n return self.__or__(other)\n\n def __xor__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.XOR, self, other)\n\n def __ixor__(self, other) -> \"Signal\":\n return self.__xor__(other)\n\n def __invert__(self) -> \"Signal\":\n return Operation.create(OPType.NOT, self, None)\n\n def __cmp__(self, other) -> \"Signal\":\n raise NotImplementedError(\"Comparison Operator is not implemented.\")\n\n def __lshift__(self, other) -> \"Signal\":\n if isinstance(other, int):\n op = Operation.create(OPType.LSHIFT, self, other)\n op._op_config.shifting = other\n return op\n raise NotImplementedError(\"Only Constant Shift is not implemented.\")\n\n def __rshift__(self, other) -> \"Signal\":\n if isinstance(other, int):\n op = Operation.create(OPType.RSHIFT, self, other)\n op._op_config.shifting = other\n return op\n raise NotImplementedError(\"Only Constant Shift is not implemented.\")\n\n def __irshift__(self, other) -> \"Signal\":\n raise NotImplementedError(\"`>>=` Operator is not defined.\")\n\n def __getitem__(self, item) -> \"Signal\":\n \"\"\" The Slicing Operator \"\"\"\n # Return the concatenation of the sliced signals\n # If multiple slices are provided.\n if isinstance(item, Iterable):\n sliced = [self[i] for i in item]\n concat = None\n for s in sliced:\n if concat is None:\n concat = s\n else:\n concat @= s\n return concat\n\n if isinstance(item, int):\n item = slice(item, item, None)\n if item is Ellipsis:\n item = slice(None, None, None)\n\n if not isinstance(item, slice):\n raise TypeError(f\"Cannot perform operation on {type(item)}\")\n if item.step is not None:\n raise ValueError(\"Slice step is not implement.\")\n\n return Operation.create(OPType.SLICE, self, item)\n\n def __matmul__(self, other) -> \"Signal\":\n \"\"\"\n Special operation for the `@` operator, which is the concatenation operator.\n \"\"\"\n if isinstance(other, Signal):\n return Operation.create(OPType.CONCAT, self, other)\n raise TypeError(f\"Cannot perform operation on {type(other)}\")\n\n def __imatmul__(self, other) -> \"Signal\":\n return self.__matmul__(other)\n\n def __len__(self):\n return self.width\n\n @property\n def width(self):\n return self._config.width\n\n def reg(\n self,\n clk: Optional[\"Input\"] = None,\n enable: Optional[\"Signal\"] = None,\n reset: Optional[\"Signal\"] = None,\n async_reset: Optional[\"Signal\"] = None,\n reset_value: Optional[Union[bytes, int]] = None,\n async_reset_value: Optional[Union[bytes, int]] = None,\n name: Optional[str] = None,\n ) -> \"Register\":\n \"\"\"\n Create a register from the signal.\n \"\"\"\n register = Register(\n width=len(self),\n enable=enable,\n reset=reset,\n async_reset=async_reset,\n reset_value=reset_value,\n async_reset_value=async_reset_value,\n clk=clk,\n signed=self.signed,\n name=name,\n )\n register <<= self\n return register\n\n def when(\n self,\n condition: \"Signal\",\n else_: Optional[\"Signal\"] = None,\n ) -> \"When\":\n \"\"\"\n Create a `Self if Condition else Else_` statement, similar to the ternary operator in C / Python.\n E.g. `gated = data.when(enable)`, `default_2 = data.when(enable, 2)`\n \"\"\"\n if else_ is None:\n else_ = 0\n return When(\n condition=condition,\n if_true=self,\n if_false=else_,\n )\n\n def case(self, cases: dict[int, Union[\"Signal\", int]], default: Optional[Union[\"Signal\", int]] = None, ) -> \"Case\":\n \"\"\"\n Create a `case` statement.\n \"\"\"\n return Case(\n selector=self,\n cases=cases,\n default=default,\n )\n\n def any(self) -> \"Signal\":\n \"\"\"\n Create an `any` statement.\n \"\"\"\n return Operation.create(OPType.ANY, self, None)\n\n def all(self) -> \"Signal\":\n \"\"\"\n Create an `all` statement.\n \"\"\"\n return Operation.create(OPType.ALL, self, None)\n\n def parity(self) -> \"Signal\":\n \"\"\"\n Create an `parity` statement.\n \"\"\"\n return Operation.create(OPType.PARITY, self, None)"
},
{
"identifier": "Module",
"path": "magia/module.py",
"snippet": "class Module(Synthesizable):\n \"\"\"\n A module is a collection of signals and operations. It can also include other modules.\n The module is the base class of specialized modules.\n Developers can define the generic behavior of the module in a dynamic way,\n while each `Module` objects is a specialized module initialized with specific parameters.\n\n The SystemVerilog Keyword `parameters` is not used here.\n It is because we can generate the code for the specialized module with parametrized values hard-coded.\n\n The module can be instantiated with the `instance` method.\n\n Designers shall implement the circuit logic in the `__init__` method.\n However, we highly recommend designers to extract the logic implementation into a seperated method.\n e.g.\n def __init__(self, **kwargs):\n self.io += Input(\"a\", 8)\n self.io += Output(\"q\", 8)\n self.implement()\n\n def implement(self):\n self.io.q <<= self.io.a + 1\n \"\"\"\n _MOD_DECL_TEMPLATE = Template(\"module $name (\\n$io\\n);\")\n _new_module_counter = count(0)\n output_file: Optional[PathLike] = None\n\n def __init__(self, name: Optional[str] = None, **kwargs):\n super().__init__(**kwargs)\n\n # Get the arguments passed to the __init__ method of the inherited class\n # === DON'T REFACTOR BELOW. We are inspecting the stack and refactoring will affect the result ===\n children_local = inspect.stack(0)[1].frame.f_locals\n children_class = children_local.get(\"__class__\")\n func_signature = inspect.signature(children_class.__init__) if children_class else {}\n self._mod_params = OrderedDict(**{\n arg: children_local[arg]\n for arg, param in func_signature.parameters.items()\n if param.kind not in (param.VAR_KEYWORD, param.VAR_POSITIONAL) and arg != \"self\"\n })\n # === DON'T REFACTOR ABOVE ===\n\n if name is None:\n name = f\"{self.__class__.__name__}_{next(self._new_module_counter)}\"\n\n self._config = ModuleConfig(\n module_class=type(self),\n name=name,\n )\n self.io = IOBundle()\n\n def validate(self) -> list[Exception]:\n undriven_outputs = [\n output.net_name\n for output in self.io.outputs\n if output.driver() is None\n ]\n if undriven_outputs:\n return [\n ValueError(\"Output not driven\", output)\n for output in undriven_outputs\n ]\n return []\n\n def mod_declaration(self) -> str:\n mod_decl = self._MOD_DECL_TEMPLATE.substitute(\n name=self.name,\n io=\",\\n\".join(\n port.elaborate()\n for port in self.io.inputs + self.io.outputs\n ),\n )\n return \"\\n\".join((mod_decl, self._module_elab_doc))\n\n def elaborate(self) -> tuple[str, set[\"Module\"]]:\n \"\"\"\n Trace nets and operations from output ports\n This method generates the SystemVerilog code for the module.\n\n :return: The SystemVerilog code for the module, and the list of submodules of the instance in the module.\n \"\"\"\n violations = self.validate()\n if violations:\n raise ValueError(f\"Module {self.name} is not valid.\", violations)\n\n mod_decl = self.mod_declaration()\n\n signals, insts = self.trace()\n\n mod_impl = [\n inst.elaborate()\n for inst in insts\n ]\n mod_impl += [\n signal.elaborate()\n for signal in signals\n ]\n\n mod_impl = \"\\n\".join(mod_impl)\n\n mod_output_assignment = \"\\n\".join(\n Signal._SIGNAL_ASSIGN_TEMPLATE.substitute(\n name=output.net_name,\n driver=output.driver().net_name,\n )\n for output in self.io.outputs\n )\n\n extra_code = self.post_elaborate()\n\n mod_end = \"endmodule\"\n\n sv_code = \"\\n\".join((mod_decl, mod_impl, mod_output_assignment, extra_code, mod_end))\n submodules = {inst.module for inst in insts}\n\n return sv_code, submodules\n\n def post_elaborate(self) -> str:\n \"\"\"\n Override this method to add extra code to the module.\n The code will be added after the elaboration of the module.\n\n Adding assertions to the module is a typical use case.\n\n :return: The extra code to be added to the module.\n \"\"\"\n _ = self # Stub to avoid IDE/Lint warning\n return \"\"\n\n def trace(self) -> tuple[list[Union[Signal, Memory]], list[\"Instance\"]]:\n \"\"\"\n Trace nets and instances from output ports\n \"\"\"\n traced_sig_id: set[int] = set()\n traced_inst_id: set[int] = set()\n traced_signal: list[Union[Signal, Memory]] = []\n traced_inst: list[Instance] = []\n sig_to_be_traced: dict[int, Signal] = {}\n\n for output in self.io.outputs:\n sig_to_be_traced |= {\n id(sig): sig\n for sig in output.drivers\n }\n while sig_to_be_traced:\n next_trace = {}\n for signal_id, signal in sig_to_be_traced.items():\n\n # Tracing Instances with Output connected\n if signal.type == SignalType.OUTPUT:\n inst: Optional[Instance] = signal.owner_instance\n if inst is not None and id(inst) not in traced_inst_id:\n traced_inst_id.add(id(inst))\n traced_inst.append(inst)\n\n # The Input port of the instance is skipped\n # We will go directly to the driver as it must be driven by another signal.\n input_drivers = [i.driver() for i in inst.inputs.values()]\n next_trace |= {\n id_sig: sig\n for sig in input_drivers\n if (id_sig := id(sig)) not in traced_sig_id\n }\n elif signal.type != SignalType.INPUT and signal_id not in traced_sig_id:\n traced_sig_id.add(signal_id)\n traced_signal.append(signal)\n\n next_trace |= {\n id_sig: sig\n for sig in signal.drivers\n if sig.type not in (SignalType.INPUT,)\n and (id_sig := id(sig)) not in traced_sig_id\n }\n\n if signal.type == SignalType.MEMORY:\n signal: MemorySignal\n if id(signal.memory) not in traced_sig_id:\n traced_sig_id.add(id(signal.memory))\n traced_signal.append(signal.memory)\n\n next_trace |= {\n id_sig: sig\n for sig in signal.memory.drivers\n if (id_sig := id(sig)) not in traced_sig_id\n }\n\n sig_to_be_traced = next_trace\n\n traced_signal.reverse()\n traced_inst.reverse()\n\n # Check if we have name conflict on the signals and instances\n sig_name_counter = Counter(sig.net_name for sig in traced_signal)\n inst_name_counter = Counter(inst.name for inst in traced_inst)\n sig_conflicts = [name for name, cnt in sig_name_counter.items() if cnt > 1]\n inst_conflicts = [name for name, cnt in inst_name_counter.items() if cnt > 1]\n if sig_conflicts:\n raise ValueError(f\"Signal name conflict: {sig_conflicts}\")\n if inst_conflicts:\n raise ValueError(f\"Instance name conflict: {inst_conflicts}\")\n\n return traced_signal, traced_inst\n\n def instance(\n self, name: Optional[str] = None,\n io: Optional[dict[str, Signal]] = None\n ) -> \"Instance\":\n \"\"\"\n Create an instance of the module\n :return: The created instance\n \"\"\"\n return Instance(\n module=self,\n name=name,\n io=io,\n )\n\n @property\n def name(self) -> str:\n return self._config.name\n\n @property\n def params(self) -> dict[str, object]:\n \"\"\"\n Return the parameters used to specialize this module.\n \"\"\"\n return self._mod_params\n\n @property\n def _module_elab_doc(self) -> str:\n \"\"\"\n Generate the summary of a module and register it to the module.\n It will be written into the SystemVerilog code during elaboration.\n \"\"\"\n doc = self._module_doc_str\n\n if self.params:\n doc += \"\\nModule Parameters:\\n\"\n doc += \"-----------------\\n\"\n doc += \"\\n\".join(\n f\"{k}: {v}\"\n for k, v in self.params.items()\n ) + \"\\n\"\n\n if doc:\n doc = f\"/*\\n{doc}*/\\n\"\n return doc\n\n @property\n def _module_doc_str(self) -> str:\n doc = inspect.getdoc(self.__class__)\n if doc is None or doc == inspect.getdoc(Module):\n return \"\"\n if not doc.endswith(\"\\n\"):\n return doc + \"\\n\"\n return doc\n\n @cached_property\n def _module_init_param_doc(self) -> dict[str, str]:\n params = [(k, f\"{k}:\") for k in self._mod_params]\n doc = inspect.getdoc(self.__init__)\n if doc is None:\n return []\n\n result_doc = {}\n possible_param = [line.strip() for line in doc.split(\"\\n\") if \":\" in line]\n for line in possible_param:\n for param, sep in params:\n if sep in line:\n result_doc[param] = line.split(sep, 1)[-1].strip()\n return result_doc\n\n @property\n def spec(self) -> dict[str, object]:\n \"\"\"\n Return the \"Specification\" of a specialized Module.\n It is a dictionary which can be further processed.\n \"\"\"\n return {\n \"name\": self.name,\n \"description\": self._module_doc_str.strip(),\n \"parameters\": [\n {\n \"name\": k,\n \"value\": v,\n \"description\": self._module_init_param_doc.get(k, \"\"),\n }\n for k, v in self.params.items()\n ],\n \"ports\": [\n {\n \"name\": alias,\n \"direction\": signal.type.name,\n \"width\": len(signal),\n \"signed\": signal.signed,\n \"description\": signal.description,\n }\n for alias, signal in self.io.signals.items()\n ],\n }"
}
] | import random
import cocotb.clock
import tests.helper as helper
from pathlib import Path
from cocotb_test.simulator import run as sim_run
from magia import Input, Module, Output, Signal | 7,323 |
@cocotb.test()
async def inst_connect_test(dut):
for i in range(10):
dut.d.value = random.randint(0, 0xF0)
await cocotb.clock.Timer(1, units="ns")
actual_value = dut.q.value
expected_value = dut.d.value + 2
assert expected_value == actual_value, f"Expected {expected_value}, got {actual_value} on Entry {i}."
class TestBundle:
TOP = "TopModule"
def test_bundle_connect_inst(self, temp_build_dir):
|
@cocotb.test()
async def inst_connect_test(dut):
for i in range(10):
dut.d.value = random.randint(0, 0xF0)
await cocotb.clock.Timer(1, units="ns")
actual_value = dut.q.value
expected_value = dut.d.value + 2
assert expected_value == actual_value, f"Expected {expected_value}, got {actual_value} on Entry {i}."
class TestBundle:
TOP = "TopModule"
def test_bundle_connect_inst(self, temp_build_dir): | class SubModule(Module): | 3 | 2023-12-12 22:50:43+00:00 | 12k |
nickruggeri/hypergraph-message-passing | main_message_passing.py | [
{
"identifier": "load_data",
"path": "src/data/data_io.py",
"snippet": "def load_data(\n real_dataset: str = \"\",\n hye_file: str = \"\",\n pickle_file: str = \"\",\n N: int | None = None,\n) -> IncidenceHypergraph:\n \"\"\"Load a hypergraph dataset.\n Utility function for loading hypergraph data provided in various formats.\n Currently, three formats are supported:\n - real_dataset: a string with the name of a real dataset\n - hye_file: a text file containing the hyperedges\n - pickle_file: the path to a pickle serialized hypergraph.\n\n The function raises an error if more than one of the options above is given as\n configurations.\n\n Parameters\n ----------\n real_dataset: name of one the supported real datasets\n hye_file: .txt file containing the hyperedges in the dataset.\n pickle_file: path to a .pkl file to be loaded via the pickle package.\n N: number of nodes. Only utilized when hye_file is provided.\n\n Returns\n -------\n The loaded hypergraph.\n \"\"\"\n # Check that the data is provided exactly in one of the possible configurations\n # formats.\n inputs = bool(real_dataset) + bool(hye_file) + bool(pickle_file)\n if inputs == 0:\n raise ValueError(\"No configurations hypergraph has been provided.\")\n if inputs >= 2:\n raise ValueError(\"More than one configurations hypergraph has been provided.\")\n\n if real_dataset:\n if real_dataset in PREPROCESSED_DATASETS:\n return load_real_hypergraph(real_dataset)\n raise ValueError(\"Unknown name for real_dataset:\", real_dataset)\n\n if pickle_file:\n with open(pickle_file, \"rb\") as file:\n return pkl.load(file)\n\n if hye_file:\n if hye_file.endswith(\".txt\"):\n return IncidenceHypergraph.load_from_txt(hye_file, N)\n elif hye_file.endswith(\".pkl\"):\n with open(hye_file, \"rb\") as file:\n hye_list = pkl.load(file)\n return IncidenceHypergraph.load_from_hye_list(hye_list, N)"
},
{
"identifier": "dynamic_updates",
"path": "src/model/dynamic_updates.py",
"snippet": "N_JOBS: int = -1\n K = model.K\n K = p.shape[0]\n K = p.shape[0]\n K = p.shape[0]\ndef compute_psi_dynamic_programming(\n hypergraph: IncidenceHypergraph,\n model: \"HypergraphBlockModel\",\n mask: np.ndarray | None = None,\n) -> list[sparse.coo_array]:\n def hyperedge_psi_(hye: int):\ndef compute_psi_tilde_dynamic_programming(\n hypergraph: IncidenceHypergraph,\n model: \"HypergraphBlockModel\",\n) -> np.ndarray:\n def hyperedge_psi_(hye):\ndef hyperedge_psi(\n incidence: sparse.csc_array,\n hye: int,\n p: np.ndarray,\n log_node_to_hye: list[sparse.csc_array],\n eta_tilde: bool = False,\n mask: np.ndarray | None = None,\n) -> tuple[np.ndarray, np.ndarray] | np.ndarray:\ndef _compute_eta(\n p: np.ndarray,\n log_node_to_hye_array: np.ndarray,\n eta_tilde: bool = False,\n) -> np.ndarray:\ndef _compute_psi_brute_force(\n hypergraph: IncidenceHypergraph,\n model: \"HypergraphBlockModel\",\n) -> list[sparse.coo_array]:\ndef _compute_eta_brute_force(\n model, p: np.ndarray, log_node_to_hye_array: np.ndarray\n) -> np.ndarray:"
},
{
"identifier": "HypergraphBlockModel",
"path": "src/model/hypergraph_block_model.py",
"snippet": "class HypergraphBlockModel:\n \"\"\"Hypergraph version of the Stochastic Block Model, introduced in\n\n \"Message Passing on Hypergraphs: Detectability, Phase Transitions, and Higher-Order\n Information\", Ruggeri et al.\n\n\n This probabilistic model for hypergraphs partitions the nodes into K hard\n communities, specified by an array of assignments t. The communities interact\n through a symmetric affinity matrix p, with shape (K, K). Together, the community\n assignments t and the affinity matrix p define the Bernoulli probability of the\n single hyperedges to be observed or not.\n \"\"\"\n\n def __init__(\n self,\n n: np.ndarray | None,\n p: np.ndarray | None,\n N: int,\n K: int,\n max_hye_size: int | None,\n ) -> None:\n r\"\"\"Stochastic Block Model for Hypergraphs.\n This version of SBM considers, for every node i, hard community assignments\n :math::`t_i`, i.e. categorical assignments to one out of K communities.\n Together with a (K, K) affinity matrix, these two parameters define the\n likelihood for unweighted hypergraphs (i.e. hyperedges have weights in {0, 1}).\n A prior :math::`n=(n_1, \\ldots, n_K)` for the community assignments can also be\n specified.\n\n Parameters\n ----------\n n: array of prior parameters for the communities.\n If specified, this array is used as initialization for EM inference,\n otherwise it is initialized at random.\n The array has length K equal to the number of communities, and specifies the\n categorical prior probabilities.\n p: symmetric matrix of community interaction probabilities.\n If specified, this matrix is used as initialization for EM inference,\n otherwise it is initialized at random.\n The matrix has shape (K, K), where K is the number of communities, and\n contains the inter and intra-community interaction probabilities,\n constrained to the [0, 1] interval.\n N: number of nodes.\n K: number of communities.\n max_hye_size: maximum size of the hyperedges D.\n Notice that this quantity is used to infer probabilistic quantities in the\n model, but is not checked against input hypergraphs.\n \"\"\"\n\n # Model related attributes\n self._check_params(n, p, K, N, max_hye_size)\n self.n = n.copy() if n is not None else None\n self.p = p.copy() if p is not None else None\n self.N = N\n self.K = K\n self.max_hye_size: int = max_hye_size if max_hye_size is not None else N\n\n # Quantities inferred after message passing.\n # log of the messages from hyperedges to nodes. Stored as lists of sparse\n # matrices. For every hyperedge e and node i, the matrix at position a in the\n # list contains the messages from e to i, for community assignment a.\n self.log_hye_to_node: list[TYPE_HYE_TO_NODE] | None = None\n # log of the messages from nodes to hyperedges.\n # They are encoded similarly to the messages above.\n self.log_node_to_hye: list[TYPE_NODE_TO_HYE] | None = None\n # Other quantities, log-marginals and external field\n self.log_marginals: np.ndarray | None = None\n self.external_field: np.ndarray | None = None\n\n # Training diagnostics.\n self.training_iter: int | None = None\n self.n_diff: list[float] = []\n self.c_diff: list[float] = []\n self.log_marginal_diff: list[list[float]] = []\n\n # Random number generator.\n self.rng: np.random.Generator = np.random.default_rng()\n\n @property\n def c(self):\n \"\"\"Return the rescaled affinity matrix c, defined as\n .. math::\n c = N p\n where N is the number of nodes and p the affinity matrix.\n \"\"\"\n return self.p * self.N\n\n def em_inference(\n self,\n hypergraph: IncidenceHypergraph,\n em_iter: int = 20,\n em_thresh: float = 1e-5,\n mp_iter: int = 2000,\n mp_thresh: float = 1e-5,\n mp_patience: int = 50,\n seed: int | None = None,\n dirichlet_alpha: float | None = None,\n dropout: float = 0.99,\n ) -> None:\n \"\"\"Perform Expectation Maximization (EM) inference on a hypergraph.\n The inference routine consist of alternating message passing, where the\n community assignments :math::`t_i` are inferred, and updates to the global\n parameters, i.e. the affinity matrix w and community priors n.\n If the affinity w or priors n are provided at initialization of the model, these\n are not inferred, but kept fixed.\n\n Parameters\n ----------\n hypergraph: hypergraph to perform inference on.\n em_iter: maximum number of EM iterations.\n One iteration consists of the message passing routine plus the global\n parameter updates.\n em_thresh: threshold for EM convergence.\n The threshold is computed over the absolute difference of the community\n priors and the affinity matrix between two consecutive EM iterations.\n mp_iter: maximum number of message passing iterations.\n mp_thresh: threshold for message passing convergence.\n The threshold is computed over the absolute difference of the log-marginals\n between two consecutive iterations.\n mp_patience: number of steps below the mp_thresh.\n After a number of consecutive iterations, specified by patience, with an\n absolute change in log-marginals below the mp_thresh, the message passing\n procedure is stopped.\n seed: random seed.\n dirichlet_alpha: parameter for the Dirichlet distribution.\n Utilized for the initialization of the messages, which are drawn from a\n uniform Dirichlet distribution with parameter alpha.\n If None, alpha is chosen automatically.\n dropout: dropout rate.\n The dropout rate it the number of randomly discarded updates in the messages\n and marginals. At every iteration of message passing, these discarded values\n are kept at the previous iteration value.\n \"\"\"\n if seed is not None:\n self.rng = np.random.default_rng(seed)\n self._check_hypergraph_vs_model_params(hypergraph)\n\n if self.n is None:\n fixed_n = False\n self._random_init_n()\n logging.info(f\"Initialized n prior:\\n{self.n}\")\n else:\n fixed_n = True\n\n if self.p is None:\n fixed_p = False\n self._random_init_p()\n logging.info(f\"Initialized rescaled affinity c=N*p:\\n{self.c}\")\n else:\n fixed_p = True\n\n for it in range(em_iter):\n logging.info(f\"EM iteration {it}\")\n\n # Local parameters: message passing.\n self.parallel_message_passing(\n hypergraph,\n mp_iter=mp_iter,\n mp_thresh=mp_thresh,\n patience=mp_patience,\n warm_start=True,\n seed=None, # keep the current random number generator unaltered.\n dirichlet_alpha=dirichlet_alpha,\n dropout=dropout,\n )\n\n # Global parameters: EM updates.\n if not fixed_n or not fixed_p:\n logging.info(\"\\tUpdates of priors n and affinity p...\")\n if not fixed_n:\n old_n = self.n.copy()\n self.n = self.updated_community_prior()\n self.n_diff.append(np.abs(old_n - self.n).sum())\n logging.info(\n f\"\\tCommunity prior:\\n{self.n}\"\n \"\\n\\tDifference from previous iteration: \"\n f\"{self.n_diff[-1]}\"\n )\n if not fixed_p:\n old_c = self.c.copy()\n self.p = self.updated_affinity_matrix(hypergraph)\n self.c_diff.append(np.abs(old_c - self.c).sum())\n logging.info(\n f\"\\tRescaled affinity matrix c=N*p:\\n{self.c}\"\n \"\\n\\tDifference from previous iteration:\"\n f\"{self.c_diff[-1]}\"\n )\n\n self.training_iter = it + 1\n\n if not fixed_n or not fixed_p:\n param_diff = 0.0\n if not fixed_n:\n param_diff += self.n_diff[-1]\n if not fixed_p:\n param_diff += self.c_diff[-1]\n if param_diff <= em_thresh:\n logging.info(\n \"Expectation-maximization threshold passed. \"\n \"inference terminated.\"\n )\n break\n\n def parallel_message_passing(\n self,\n hypergraph: IncidenceHypergraph,\n mp_iter: int = 2000,\n mp_thresh: float = 1.0e-5,\n dirichlet_alpha: float | None = None,\n dropout: float = 0.99,\n patience: int = 50,\n seed: int | None = None,\n warm_start: bool = True,\n ) -> None:\n \"\"\"Perform message passing inference of the node assignments.\n\n Parameters\n ----------\n hypergraph: a hypergraph.\n mp_iter: maximum number of message passing iterations.\n mp_thresh: threshold for message passing convergence.\n The threshold is computed over the absolute difference of the log-marginals\n between two consecutive iterations.\n dirichlet_alpha: parameter for the Dirichlet distribution.\n Utilized for the initialization of the messages, which are drawn from a\n uniform Dirichlet distribution with parameter alpha.\n If None, alpha is chosen automatically.\n dropout: dropout rate.\n The dropout rate it the number of randomly discarded updates in the messages\n and marginals. At every iteration of message passing, these discarded values\n are kept at the previous iteration value.\n patience: number of steps below the mp_thresh.\n After a number of consecutive iterations, specified by patience, with an\n absolute change in log-marginals below the mp_thresh, the message passing\n procedure is stopped.\n seed: random seed.\n warm_start: whether to re-initialize the messages and marginal beliefs.\n \"\"\"\n logging.info(\"\\tMessage passing...\")\n if seed is not None:\n self.rng = np.random.default_rng(seed)\n self._check_hypergraph_vs_model_params(hypergraph)\n\n all_messages_init = (\n self.log_hye_to_node is not None\n and self.log_node_to_hye is not None\n and self.log_marginals is not None\n and self.external_field is not None\n )\n\n if not warm_start or not all_messages_init:\n alpha = 10.0 * self.K if dirichlet_alpha is None else dirichlet_alpha\n self._init_message_passing(hypergraph, dirichlet_alpha=alpha)\n logging.debug(\n f\"\\t\\tInitialized hye to node:\\n{self.log_hye_to_node[0].data[:5]}\"\n )\n logging.debug(\n f\"\\t\\tInitialized node to hye:\\n{self.log_node_to_hye[0].data[:5]}\"\n )\n logging.debug(f\"\\t\\tInitialized marginals:\\n{self.log_marginals[:5]}\")\n logging.debug(f\"\\t\\tInitialized external field:\\n{self.external_field}\")\n\n self.log_marginal_diff.append(list())\n patience_count = 0\n for i in range(mp_iter):\n old_log_marginals = self.log_marginals.copy()\n self._parallel_message_passing_step(hypergraph, dropout)\n self.log_marginal_diff[-1].append(\n np.abs(old_log_marginals - self.log_marginals).sum()\n )\n logging.info(\n f\"\\t\\tMP step {i} - difference in log-marginals from previous iter: \"\n f\"{self.log_marginal_diff[-1][-1]}\"\n )\n\n if self.log_marginal_diff[-1][-1] <= mp_thresh:\n patience_count += 1\n else:\n patience_count = 0\n\n if patience_count == patience:\n logging.info(\n \"\\tMessage passing threshold passed. Message passing terminated.\"\n )\n break\n\n def _parallel_message_passing_step(\n self,\n hypergraph: IncidenceHypergraph,\n dropout: float = 0.99,\n ) -> None:\n \"\"\"Perform one step of message passing, updating the messages from nodes to\n factors, the messages from factors to nodes, the marginal probabilities and\n external field.\"\"\"\n inc = hypergraph.get_binary_incidence_matrix()\n\n # Update node to hye.\n new_node_to_hye = [None] * self.K\n for assignment in range(self.K):\n col_sum = self.log_hye_to_node[assignment].sum(axis=1)\n assert col_sum.shape == (self.N,)\n col_sum += np.log(self.n[assignment]) - self.external_field[assignment]\n col_sum = col_sum.reshape((self.N, 1))\n new_node_to_hye[assignment] = (\n TYPE_HYE_TO_NODE(inc * col_sum) - self.log_hye_to_node[assignment]\n )\n\n norm = sparse_reduce_lse(*new_node_to_hye)\n for assignment in range(self.K):\n new_node_to_hye[assignment].data -= norm.data\n new_node_to_hye[assignment].data = np.clip(\n new_node_to_hye[assignment].data, a_min=CLIP_MIN, a_max=CLIP_MAX\n )\n\n # TODO dropout could be made more efficient here. Do it or not?\n if dropout > 0:\n non_dropout_mask = (\n self.rng.random(len(self.log_node_to_hye[0].data)) >= dropout\n )\n for assignment in range(self.K):\n self.log_node_to_hye[assignment].data[\n non_dropout_mask\n ] = new_node_to_hye[assignment].data[non_dropout_mask]\n else:\n for assignment in range(self.K):\n self.log_node_to_hye[assignment].data = new_node_to_hye[assignment].data\n\n logging.debug(f\"\\t\\tUpdated node to hye:\\n{self.log_node_to_hye[0].data[:5]}\")\n\n # Update hye to node.\n if dropout > 0:\n non_dropout_mask = (\n self.rng.random(len(self.log_hye_to_node[0].data)) >= dropout\n )\n else:\n non_dropout_mask = None\n new_hye_to_node = [\n TYPE_HYE_TO_NODE(x)\n for x in compute_psi_dynamic_programming(\n hypergraph=hypergraph,\n model=self,\n mask=non_dropout_mask,\n )\n ]\n\n norm = sparse_reduce_lse(*new_hye_to_node)\n for assignment in range(self.K):\n new_hye_to_node[assignment].data -= norm.data\n new_hye_to_node[assignment].data = np.clip(\n new_hye_to_node[assignment].data, a_min=CLIP_MIN, a_max=CLIP_MAX\n )\n\n for assignment in range(self.K):\n self.log_hye_to_node[assignment].data[non_dropout_mask] = new_hye_to_node[\n assignment\n ].data\n\n logging.debug(f\"\\t\\tUpdated hye to node:\\n{self.log_hye_to_node[0].data[:5]}\")\n\n # Update marginals.\n new_marginals = []\n for assignment in range(self.K):\n col_sum = self.log_hye_to_node[assignment].sum(axis=1)\n assert col_sum.shape == (self.N,)\n col_sum += np.log(self.n[assignment]) - self.external_field[assignment]\n new_marginals.append(col_sum)\n new_marginals = np.stack(new_marginals, axis=1)\n assert new_marginals.shape == (self.N, self.K)\n\n new_marginals = new_marginals - special.logsumexp(\n new_marginals, axis=1, keepdims=True\n )\n new_marginals = np.clip(new_marginals, a_min=CLIP_MIN, a_max=CLIP_MAX)\n\n if dropout > 0:\n non_dropout_mask = self.rng.random(self.N) >= dropout\n self.log_marginals[non_dropout_mask] = new_marginals[non_dropout_mask]\n else:\n self.log_marginals = new_marginals\n\n logging.debug(f\"\\t\\tUpdated marginals:\\n{self.log_marginals[:5]}\")\n\n # Update external field.\n lse_term = special.logsumexp(\n a=self.log_marginals.reshape((self.N, self.K, 1)),\n b=self.c.reshape(1, self.K, self.K),\n axis=(0, 1),\n )\n assert lse_term.shape == (self.K,)\n\n C_prime = compute_C_prime(self.max_hye_size)\n self.external_field = C_prime / self.N * np.exp(lse_term)\n logging.debug(f\"\\t\\tUpdated external field:\\n{self.external_field}\")\n\n def updated_community_prior(self) -> np.ndarray:\n \"\"\"Parameter updates for the community priors n during EM inference.\n\n Returns\n -------\n The updated array of community priors.\n \"\"\"\n assignments = self.community_assignments()\n comm, counts = np.unique(assignments, return_counts=True)\n\n n = np.zeros(self.K)\n n[comm] = counts / self.N\n return np.clip(n, a_min=1.0e-20, a_max=1.0)\n\n def updated_affinity_matrix(self, hypergraph: IncidenceHypergraph) -> np.ndarray:\n \"\"\"Parameter updates for the affinity matrix p during EM inference.\n\n Parameters\n ----------\n hypergraph: a hypergraph.\n\n Returns\n -------\n The updated affinity matrix.\n \"\"\"\n # Numerator.\n pi, interactions = self.hye_pi(hypergraph, return_interactions=True)\n numerator = np.tensordot(\n interactions, 1 / np.clip(pi, a_min=1.0e-20, a_max=None), axes=(0, 0)\n )\n assert numerator.shape == (self.K, self.K)\n\n # Denominator.\n C_prime = compute_C_prime(self.max_hye_size)\n denominator = (\n self.N * C_prime * (self.N * np.outer(self.n, self.n) - np.diag(self.n))\n )\n\n p = self.p * 2 * numerator / denominator\n return np.clip(p, a_min=1e-20, a_max=0.99)\n\n def community_assignments(self):\n marginals = self.log_marginals\n return np.argmax(marginals, axis=1)\n\n def compute_external_field(self) -> np.array:\n r\"\"\"Compute the approximate external field, defined as\n .. math::\n h(t_i) :=\n \\frac{C'}{N}\n \\sum_{j \\in V} \\sum_{t_j} c_{t_i t_j} q_j(t_j)\n where\n .. math::\n C' = \\sum_{d=2}^D \\binom{N-2}{d-2} \\frac{1}{\\kappa_d}\n\n Returns\n -------\n The external field h.\n \"\"\"\n log_marginals = self.log_marginals\n c = self.c\n K = self.K\n N = self.N\n C_prime = compute_C_prime(self.max_hye_size)\n\n external_field = special.logsumexp(\n a=log_marginals.reshape(N, 1, K), b=c.reshape(1, K, K), axis=(0, 2)\n )\n assert external_field.shape == (K,)\n return C_prime / N * np.exp(external_field)\n\n def single_hye_pi(self, assignments: Iterable[int]) -> float:\n r\"\"\"Compute the hyperedge unnormalized probability.\n For a hyperedge e and community assignments t, the unnormalized probability is\n given by\n .. math::\n \\pi_e := \\sum_{i < j \\in e} p_{t_i t_j}\n\n Parameters\n ----------\n assignments: community assignments.\n This array contains the community assignments :math::`t_i` (with values\n between 0 and K-1, where K is the number of communities) for all nodes i in\n the hyperedge.\n\n Returns\n -------\n The value of :math::`\\pi_e`.\n \"\"\"\n K = self.K\n hye_comm_counts = [0] * K\n counts = Counter(assignments)\n for comm, count in counts.items():\n hye_comm_counts[comm] = count\n\n return hyperedge_pi(hye_comm_counts, self.p)\n\n def hye_pi(\n self, hypergraph: IncidenceHypergraph, return_interactions: bool = False\n ) -> np.ndarray | tuple[np.ndarray, np.ndarray]:\n r\"\"\"Compute the hyperedge unnormalized probabilities for all the hyperedges in\n the hypergraph. For a hyperedge e, the unnormalized probability has form\n .. math::\n \\pi_e := \\sum_{i <j \\in e} p_{t_i t_j}\n with p affinity matrix and :math::`t_i` community assignment of node i.\n\n Parameters\n ----------\n hypergraph: the input hypergraph.\n return_interactions: whether to optionally return the tensor of community\n interactions within hyperedges, defined as, for any hyperedge e and\n communities a, b:\n .. math::\n \\#_{ab}^{(e)} := \\sum_{i <j \\in e} \\delta_{t_i a} \\delta_{t_j b}\n where :math::`\\delta_{xy}` is the Dirac delta, equal to 1 if :math::`x=y`,\n else 0.\n The tensor :math::`\\#` has shape (E, K, K), with E number of hyperedges and\n K number of communities.\n Returns\n -------\n The array of :math::`\\pi_e` values. Optionally, the tensor of :math::`\\#`\n values.\n \"\"\"\n E = hypergraph.E\n K = self.K\n p = self.p\n incidence = hypergraph.get_binary_incidence_matrix()\n\n onehot_assignments = np.zeros((self.N, K))\n onehot_assignments[np.arange(self.N), self.community_assignments()] = 1\n\n counts = incidence.transpose() @ onehot_assignments\n assert counts.shape == (E, K)\n del onehot_assignments\n\n interactions = counts.reshape(E, 1, K) * counts.reshape(E, K, 1)\n interactions[:, np.arange(K), np.arange(K)] = counts * (counts - 1) / 2\n assert interactions.shape == (E, K, K)\n del counts\n\n pi = 0.5 * (\n np.sum(interactions * p.reshape(1, K, K), axis=(1, 2))\n + np.inner(interactions[:, np.arange(K), np.arange(K)], np.diagonal(p))\n )\n\n if return_interactions:\n return pi, interactions\n return pi\n\n def free_energy(self, hypergraph: IncidenceHypergraph) -> float:\n \"\"\"Compute the free energy of a hypergraph utilizing the message passing\n cavity approximations. The free energy, often denoted as :math::`F = -log Z`,\n corresponds to the negative log-normalizing constant of the Boltzmann\n distribution. Z is also called the evidence of the probabilistic model.\n\n Parameters\n ----------\n hypergraph: hypergraph.\n\n Returns\n -------\n The log-likelihood value.\n \"\"\"\n self._check_hypergraph_vs_model_params(hypergraph)\n K = self.K\n N = self.N\n external_field = self.compute_external_field()\n ones = np.ones(hypergraph.E)\n log_marginals = self.log_marginals\n hye_dims = hypergraph.get_binary_incidence_matrix().sum(axis=0)\n\n # Node-related addends.\n f_i = [\n x.tocsc().dot(ones) - external_field[k]\n for k, x in enumerate(\n compute_psi_dynamic_programming(hypergraph=hypergraph, model=self)\n )\n ]\n assert len(f_i) == K\n assert all(x.shape == (N,) for x in f_i)\n f_i = np.vstack(f_i).T\n assert f_i.shape == (N, K)\n f_i = special.logsumexp(a=f_i, b=self.n.reshape(1, -1), axis=1)\n f_i_sum = f_i.sum()\n\n # Edge-related addends.\n # First addend.\n first_addend = compute_psi_tilde_dynamic_programming(\n hypergraph=hypergraph, model=self\n )\n first_addend = ((hye_dims - 1) * first_addend).sum()\n\n # Second addend.\n log_marginal_sum = special.logsumexp(log_marginals, axis=0)\n cross_log_marginal_sum = log_marginal_sum.reshape(\n (1, K)\n ) + log_marginal_sum.reshape((K, 1))\n assert cross_log_marginal_sum.shape == (K, K)\n\n cross_log_marginals = log_marginals.reshape((N, 1, K)) + log_marginals.reshape(\n (N, K, 1)\n )\n assert cross_log_marginals.shape == (N, K, K)\n cross_log_marginals = special.logsumexp(cross_log_marginals, axis=0)\n\n second_addend = special.logsumexp(\n a=np.hstack([cross_log_marginal_sum, cross_log_marginals]),\n b=np.hstack([self.c, -self.c]),\n )\n second_addend = np.exp(second_addend)\n second_addend *= compute_C_third(self.max_hye_size) / (2 * N)\n\n f_e_sum = first_addend + second_addend\n\n return -f_i_sum + f_e_sum\n\n @staticmethod\n def _check_params(\n n: np.ndarray, p: np.ndarray, K: int, N: int, max_hye_size: int | None\n ) -> None:\n \"\"\"Check the correctness of the initialization parameters.\"\"\"\n # Check coherence between n and p.\n if n is not None:\n if not np.allclose(n.sum(), 1):\n raise ValueError(\n \"The prior parameters n for the community distribution do not \"\n \"sum to 1.\"\n )\n if np.any(n < 0):\n raise ValueError(\n \"The prior parameters n for the community distribution contain \"\n \"negative values.\"\n )\n if len(n.shape) != 1:\n raise ValueError(\n \"The array of prior parameters n is not one-dimensional.\"\n )\n if n.shape != (K,):\n raise ValueError(\n \"The array of prior parameters n has dimension different from the \"\n \"number of communities K.\"\n )\n\n if p is not None:\n if not np.all(p == p.T):\n raise ValueError(\"The probability matrix p is not symmetric.\")\n\n if np.any(p > 1) or np.any(p < 0):\n raise ValueError(\n \"The probability matrix p contains values outside \"\n \"the (0, 1) interval.\"\n )\n\n if p.shape != (K, K):\n raise ValueError(\"The matrix p has shape different from (K, K).\")\n\n if p is not None and n is not None:\n if not p.shape == (K, K):\n raise ValueError(\n \"The shapes of n and p do not match. They need to be respectively \"\n \"(K,) and (K, K) for some integer K.\"\n )\n\n # Check coherence between N and max_hye_size.\n if max_hye_size is not None and max_hye_size < 2:\n raise ValueError(\"The max_hye_size cannot be lower than 2.\")\n\n if max_hye_size is not None and max_hye_size > N:\n raise ValueError(\n \"max_hye_size cannot be higher than the number of nodes N.\"\n )\n\n def _check_hypergraph_vs_model_params(\n self, hypergraph: IncidenceHypergraph\n ) -> None:\n \"\"\"Check that the model parameters are coherent with an input hypergraph.\"\"\"\n if hypergraph.N != self.N:\n raise ValueError(\n \"The input hypergraph has a different number of nodes \"\n \"than the value specified for the model.\"\n )\n\n if hypergraph.max_hye_size > self.max_hye_size:\n raise ValueError(\n \"The input hypergraph contains hyperedges bigger than the max_hye_size \"\n \"specified in the model.\"\n )\n\n def _random_init_n(self) -> None:\n \"\"\"Random initialization of the community priors n.\"\"\"\n self.n = self.rng.dirichlet(alpha=[100] * self.K)\n\n def _random_init_p(self) -> None:\n \"\"\"Random initialization of the affinity matrix p.\"\"\"\n K = self.K\n N = self.N\n\n p = np.ones((K, K)) / (10 * (K - 1))\n p += self.rng.random((K, K)) / 50\n p = np.triu(p, 1) + np.triu(p, 1).T\n np.fill_diagonal(p, 1.0 + self.rng.random(K) / 50)\n p /= N\n p = np.clip(p, a_min=1e-10, a_max=1.0)\n\n self.p = p\n\n def _init_message_passing(\n self,\n hypergraph: IncidenceHypergraph,\n dirichlet_alpha: float = 10.0,\n ) -> None:\n r\"\"\"Random initialization of the messages, marginal beliefs, and external field.\n The initialization is performed to respect the fixed-point conditions given by\n the message passing equations.\n\n Parameters\n ----------\n hypergraph: a hypergraph.\n dirichlet_alpha: parameter to initialize the messages and marginal beliefs.\n These are drawn from a Dirichlet distribution with a uniform parameter array\n :math::`(\\alpha, \\ldots, \\alpha)` with length the number of communities.\n \"\"\"\n incidence = hypergraph.get_binary_incidence_matrix()\n\n def random_prob_init():\n beliefs = [incidence.copy().astype(float) for _ in range(self.K)]\n vals = self.rng.dirichlet(\n [dirichlet_alpha] * len(beliefs), size=len(beliefs[0].data)\n )\n for i, belief in enumerate(beliefs):\n belief.data *= vals[:, i]\n\n return beliefs\n\n # Random initialization of messages from nodes to hyperedges.\n log_node_to_hye = random_prob_init()\n for belief in log_node_to_hye:\n belief.data = np.log(belief.data)\n self.log_node_to_hye = [TYPE_NODE_TO_HYE(mat) for mat in log_node_to_hye]\n\n # Random initialization of the marginal beliefs.\n marginals = self.rng.dirichlet([dirichlet_alpha] * self.K, size=self.N)\n assert marginals.shape == (self.N, self.K)\n self.log_marginals = np.log(marginals)\n\n # Compute external field from marginals.\n self.external_field = self.compute_external_field()\n\n # Infer hye to node as ratio of marginals and noe to hye\n log_hye_to_node = []\n for assignment in range(self.K):\n log_hye_to_node.append(\n TYPE_HYE_TO_NODE(\n incidence * self.log_marginals[:, assignment].reshape(self.N, 1)\n )\n - self.log_node_to_hye[assignment]\n )\n\n normalizer = sparse_reduce_lse(*log_hye_to_node)\n for assignment in range(self.K):\n log_hye_to_node[assignment].data -= normalizer.data\n self.log_hye_to_node = log_hye_to_node"
}
] | import logging
import random
import sys
import numpy as np
from argparse import ArgumentParser
from pathlib import Path
from sem.str_to_type import none_or_type
from src.data.data_io import load_data
from src.model import dynamic_updates
from src.model.hypergraph_block_model import HypergraphBlockModel | 9,668 | type=none_or_type(int),
default=None,
help=(
"Number of nodes in the configurations hypergraph. Only needed (optionally)"
" when specifying hye_file."
),
)
parser.add_argument(
"--K",
type=int,
help="Number of communities in the model.",
)
parser.add_argument(
"--n",
type=none_or_type(str),
default=None,
help=(
"Prior parameters for the communities of the stochastic block model. "
"This is a path to a file to be opened via numpy.loadtxt. "
"If not provided, the value of n is initialized at random. "
),
)
parser.add_argument(
"--p",
type=none_or_type(str),
default=None,
help=(
"Symmetric matrix of community interaction probabilities. "
"This is a path to a file to be opened via numpy.loadtxt "
"If not provided, the value of p is initialized at random. "
),
)
# Model training.
parser.add_argument(
"--train_rounds",
type=int,
default=1,
help=(
"Train with different various random initializations and "
"choose only the model attaining the best log-likelihood."
),
)
parser.add_argument(
"--em_iter",
type=int,
default=20,
help="Max iterations of the EM procedure.",
)
parser.add_argument(
"--em_thresh",
type=float,
default=1.0e-5,
help=(
"Threshold for the parameter change during EM. The difference is computed "
"with respect to the affinity matrix p and the community prior n."
),
)
parser.add_argument(
"--mp_iter",
type=int,
default=2000,
help="Max iterations of the message passing procedure.",
)
parser.add_argument(
"--mp_thresh",
type=float,
default=1.0e-5,
help=(
"Threshold for the parameter change during message passing. "
"The difference is computed with respect to the log-marginal values."
),
)
parser.add_argument(
"--mp_patience",
type=int,
default=50,
help=(
"Number of consecutive steps where the change in log-marginals is below "
"the mp_thresh before message passing is stopped."
),
)
parser.add_argument(
"--dirichlet_init_alpha",
type=none_or_type(float),
default=None,
help="Dirichlet alpha utilized for the model initialization.",
)
parser.add_argument(
"--dropout",
type=float,
default=0.99,
help="Dropout in the message passing updates.",
)
parser.add_argument(
"--n_jobs",
type=int,
default=-1,
help=(
"Maximum number of parallel jobs. "
"1 means no parallelization, -1 means all the available cores."
),
)
parser.add_argument(
"--seed",
type=none_or_type(int),
default=None,
help="Random seed.",
)
parser.add_argument(
"--logging",
type=str,
default="INFO",
help="Logging level.",
)
args = parser.parse_args()
random.seed(args.seed)
logging.getLogger().setLevel(args.logging.upper())
|
if __name__ == "__main__":
parser = ArgumentParser()
# Data IO.
parser.add_argument(
"--real_dataset",
type=none_or_type(str),
default=None,
help="Name of a real dataset to be loaded.",
)
parser.add_argument(
"--hye_file",
type=none_or_type(str),
default=None,
help=(
"Path to a file containing a list of hyperedges representing a "
"hypergraph.",
),
)
parser.add_argument(
"--pickle_file",
type=none_or_type(str),
default=None,
help="Path to a file containing a pickle serialized hypergraph.",
)
# Data parameters.
parser.add_argument(
"--max_hye_size",
type=none_or_type(int),
default=None,
help=(
"The maximum hyperedge size considered. This value is used to exclude "
"hyperedges in the configurations hypergraph, as well as a parameter of "
"the probabilistic model to compute internal quantities."
),
)
parser.add_argument(
"--save_dir", type=none_or_type(Path), help="Directory where results are saved."
)
# Model parameters.
parser.add_argument(
"--N",
type=none_or_type(int),
default=None,
help=(
"Number of nodes in the configurations hypergraph. Only needed (optionally)"
" when specifying hye_file."
),
)
parser.add_argument(
"--K",
type=int,
help="Number of communities in the model.",
)
parser.add_argument(
"--n",
type=none_or_type(str),
default=None,
help=(
"Prior parameters for the communities of the stochastic block model. "
"This is a path to a file to be opened via numpy.loadtxt. "
"If not provided, the value of n is initialized at random. "
),
)
parser.add_argument(
"--p",
type=none_or_type(str),
default=None,
help=(
"Symmetric matrix of community interaction probabilities. "
"This is a path to a file to be opened via numpy.loadtxt "
"If not provided, the value of p is initialized at random. "
),
)
# Model training.
parser.add_argument(
"--train_rounds",
type=int,
default=1,
help=(
"Train with different various random initializations and "
"choose only the model attaining the best log-likelihood."
),
)
parser.add_argument(
"--em_iter",
type=int,
default=20,
help="Max iterations of the EM procedure.",
)
parser.add_argument(
"--em_thresh",
type=float,
default=1.0e-5,
help=(
"Threshold for the parameter change during EM. The difference is computed "
"with respect to the affinity matrix p and the community prior n."
),
)
parser.add_argument(
"--mp_iter",
type=int,
default=2000,
help="Max iterations of the message passing procedure.",
)
parser.add_argument(
"--mp_thresh",
type=float,
default=1.0e-5,
help=(
"Threshold for the parameter change during message passing. "
"The difference is computed with respect to the log-marginal values."
),
)
parser.add_argument(
"--mp_patience",
type=int,
default=50,
help=(
"Number of consecutive steps where the change in log-marginals is below "
"the mp_thresh before message passing is stopped."
),
)
parser.add_argument(
"--dirichlet_init_alpha",
type=none_or_type(float),
default=None,
help="Dirichlet alpha utilized for the model initialization.",
)
parser.add_argument(
"--dropout",
type=float,
default=0.99,
help="Dropout in the message passing updates.",
)
parser.add_argument(
"--n_jobs",
type=int,
default=-1,
help=(
"Maximum number of parallel jobs. "
"1 means no parallelization, -1 means all the available cores."
),
)
parser.add_argument(
"--seed",
type=none_or_type(int),
default=None,
help="Random seed.",
)
parser.add_argument(
"--logging",
type=str,
default="INFO",
help="Logging level.",
)
args = parser.parse_args()
random.seed(args.seed)
logging.getLogger().setLevel(args.logging.upper())
| hyg = load_data( | 0 | 2023-12-06 22:01:38+00:00 | 12k |
kramerlab/PeerLearning | run_peer.py | [
{
"identifier": "DQNPeer",
"path": "dqn_peer.py",
"snippet": "class DQNPeer(make_peer_class(DQN)):\n \"\"\"\n A DQN version to be used with peer learning. Therefore, it features\n a critic function\n \"\"\"\n def critic(self, observations, actions):\n q_values = self.q_net(observations).reshape(len(actions), -1, 1)\n tmp = q_values[range(len(actions)), actions, :]\n return tmp, tmp # SAC critic outputs multiple values, so this need\n # to do the same\n\n def get_action(self, *args, **kwargs):\n action, _ = super().get_action(*args, **kwargs)\n return action.reshape(-1), _"
},
{
"identifier": "PeerGroup",
"path": "peer.py",
"snippet": "class PeerGroup:\n \"\"\" A group of peers who train together. \"\"\"\n def __init__(self, peers, use_agent_values=False, init_agent_values=200.,\n lr=0.95, switch_ratio=0, use_advantage=False,\n max_peer_epochs=1_000_000_000):\n \"\"\"\n :param peers: An iterable of peer agents\n :param lr: The learning rate for trust and agent values\n :param switch_ratio: switch_ratio == 0 means no switching\n :param use_advantage: use advantage instead of value for AV updates\n \"\"\"\n self.peers = peers\n self.lr = lr\n self.switch_ratio = switch_ratio\n self.active_peer = None # index of currently learning peer\n self.solo_epoch = False\n self.use_advantage = use_advantage\n self.max_peer_epochs = max_peer_epochs\n\n if use_agent_values:\n self.agent_values = np.full(len(peers), init_agent_values,\n dtype=np.float32)\n key = \"agent_values\"\n\n for peer in peers:\n peer.n_peers = len(peers)\n peer.group = self\n\n # setup agent values\n if use_agent_values:\n peer.peer_values[key] = self.agent_values # noqa (Eq. 6)\n peer.peer_value_functions[key] = self._update_agent_values\n\n def _update_agent_values(self, batch_size=10):\n \"\"\" Updates the agent values with samples from the peers' buffers\"\"\"\n targets = np.zeros_like(self.peers, dtype=np.float32)\n counts = np.zeros_like(self.peers, dtype=np.float32)\n\n for peer in self.peers:\n bs = batch_size // len(self.peers)\n # reward, action, peer, new_obs, old_obs\n if peer.buffer is not None:\n batch = peer.buffer.sample(bs)\n if batch is None: # buffer not sufficiently full\n return\n\n obs = np.array([b[3] for b in batch]).reshape(bs, -1)\n v = peer.value(obs)\n\n if self.use_advantage:\n # previous observations\n prev_obs = np.array([b[4] for b in batch]).reshape(bs, -1)\n prev_v = peer.value(prev_obs)\n else:\n prev_v = np.zeros_like(v) # no advantage (see Eq. 5)\n\n for i in range(len(batch)): # Eq. 8\n target = (batch[i][0] + peer.gamma * v[i]) - prev_v[i]\n counts[batch[i][2]] += 1\n targets[batch[i][2]] += target\n\n # ensure counts are >= 1, don't change these values\n targets[counts == 0] = self.agent_values[counts == 0]\n counts[counts == 0] = 1\n\n targets /= counts\n self.agent_values += self.lr * (targets - self.agent_values) # Eq. 7\n\n def learn(self, n_epochs, max_epoch_len, callbacks, **kwargs):\n \"\"\" The outer peer learning routine. \"\"\"\n assert len(callbacks) == len(self.peers)\n # more solo epochs\n boost_single = 0 < self.switch_ratio < 1\n if boost_single:\n self.switch_ratio = 1 / self.switch_ratio\n\n self.solo_epoch = False\n peer_epochs = 0\n for i in range(n_epochs):\n # don't do peer learning forever\n if peer_epochs < self.max_peer_epochs:\n # ratio of 0 never performs a solo episode\n if (i % (1 + self.switch_ratio) == 1) ^ boost_single:\n self.solo_epoch = True\n else:\n peer_epochs += 1\n else: # budget spent\n self.solo_epoch = True\n\n for p, peer, callback in zip(it.count(), self.peers, callbacks):\n self.active_peer = p\n peer.learn(self.solo_epoch, total_timesteps=max_epoch_len,\n callback=callback, tb_log_name=f\"Peer{p}\",\n reset_num_timesteps=False,\n log_interval=None, **kwargs)\n # update epoch for temperature decay\n peer.epoch += 1\n\n self.active_peer = None\n\n def __len__(self):\n return len(self.peers)"
},
{
"identifier": "make_peer_class",
"path": "peer.py",
"snippet": "def make_peer_class(cls: Type[OffPolicyAlgorithm]):\n \"\"\" Creates a mixin with the corresponding algorithm class.\n :param cls: The learning algorithm (needs to have a callable critic).\n :return: The mixed in peer agent class.\n \"\"\"\n\n class Peer(cls, ABC):\n \"\"\" Abstract Peer class\n needs to be mixed with a suitable algorithm. \"\"\"\n def __init__(self, temperature, temp_decay, algo_args, env,\n use_trust=False, use_critic=False, init_trust_values=200,\n buffer_size=1000, follow_steps=10, seed=None,\n use_trust_buffer=True, solo_training=False,\n peers_sample_with_noise=False,\n sample_random_actions=False, sample_from_suggestions=True,\n epsilon=0.0, env_args=None, only_follow_peers=False):\n if env_args is None:\n env_args = {}\n super(Peer, self).__init__(**algo_args,\n env=make_env(env, **env_args),\n seed=seed)\n # create noise matrix on the correct device\n if hasattr(self.actor, \"reset_noise\"):\n self.actor.reset_noise(self.env.num_envs)\n\n self.solo_training = solo_training\n self.init_values = dict()\n # store all peer values, e.g., trust and agent values in a dict\n self.peer_values = dict()\n # store corresponding functions as well\n self.peer_value_functions = dict()\n\n self.buffer = SuggestionBuffer(buffer_size)\n self.followed_peer = None\n self.__n_peers = None\n self.group = None\n self.epoch = 0\n\n if sample_random_actions:\n epsilon = 1.0\n\n if not solo_training:\n # all peers suggest without noise\n self.peers_sample_with_noise = peers_sample_with_noise\n # actions are sampled instead of taken greedily\n self.sample_actions = sample_from_suggestions\n self.epsilon = epsilon\n self.use_critic = use_critic\n\n if use_trust:\n self.trust_values = np.array([])\n self.init_values[\"trust\"] = init_trust_values\n self.peer_value_functions[\"trust\"] = self._update_trust\n\n self.use_buffer_for_trust = use_trust_buffer\n\n # sampling parameters\n self.temperature = temperature\n self.temp_decay = temp_decay\n\n self.follow_steps = follow_steps\n self.steps_followed = 0\n\n self.only_follow_peers = only_follow_peers\n\n @property\n def n_peers(self):\n return self.__n_peers\n\n @n_peers.setter\n def n_peers(self, n_peers):\n self.__n_peers = n_peers\n\n # Also reset the trust values\n if \"trust\" in self.init_values.keys():\n self.trust_values = np.full(self.__n_peers,\n self.init_values[\"trust\"],\n dtype=np.float32)\n self.peer_values[\"trust\"] = self.trust_values\n\n def critique(self, observations, actions) -> np.array:\n \"\"\" Evaluates the actions with the critic. \"\"\"\n with torch.no_grad():\n a = torch.as_tensor(actions, device=self.device)\n o = torch.as_tensor(observations, device=self.device)\n\n # Compute the next Q values: min over all critic targets\n q_values = torch.cat(self.critic(o, a), dim=1) # noqa\n q_values, _ = torch.min(q_values, dim=1, keepdim=True)\n return q_values.cpu().numpy()\n\n def get_action(self, obs, deterministic=False):\n \"\"\" The core function of peer learning acquires the suggested\n actions of the peers and chooses one based on the settings. \"\"\"\n # follow peer for defined number of steps\n followed_steps = self.steps_followed\n self.steps_followed += 1\n self.steps_followed %= self.follow_steps\n if 0 < followed_steps:\n peer = self.group.peers[self.followed_peer]\n det = (peer != self and not self.peers_sample_with_noise) or \\\n deterministic\n action, _ = peer.policy.predict(obs, deterministic=det)\n return action, None\n\n # get actions\n actions = []\n for peer in self.group.peers:\n # self always uses exploration, the suggestions of the other\n # peers only do if the critic method isn't used.\n det = (peer != self and not self.peers_sample_with_noise) or \\\n deterministic\n action, _ = peer.policy.predict(obs, deterministic=det)\n actions.append(action)\n actions = np.asarray(actions).squeeze(1)\n\n # critic (Eq. 3)\n if self.use_critic:\n observations = np.tile(obs, (self.n_peers, 1))\n q_values = self.critique(observations, actions).reshape(-1)\n self.peer_values['critic'] = q_values # part of Eq. 9\n\n # calculate peer values, e.g., trust and agent values\n values = np.zeros(self.n_peers)\n for key in self.peer_values.keys():\n # part of Eq. 9 incl. Footnote 7\n values += self.__normalize(self.peer_values[key])\n\n if self.sample_actions:\n # sample action from probability distribution (Eq. 2)\n temp = self.temperature * np.exp(-self.temp_decay * self.epoch)\n p = np.exp(values / temp)\n p /= np.sum(p)\n self.followed_peer = np.random.choice(self.n_peers, p=p)\n elif self.only_follow_peers:\n p = np.full(self.n_peers, 1 / (self.n_peers - 1))\n p[self.group.peers.index(self)] = 0\n self.followed_peer = np.random.choice(self.n_peers, p=p)\n else:\n # act (epsilon) greedily\n if np.random.random(1) >= self.epsilon:\n self.followed_peer = np.argmax(values)\n else:\n self.followed_peer = np.random.choice(self.n_peers)\n\n action = actions[self.followed_peer].reshape(1, -1)\n\n return action, None\n\n @staticmethod\n def __normalize(values):\n \"\"\" Normalize the values based on their absolute maximum. \"\"\"\n return values / np.max(np.abs(values))\n\n def value(self, observations) -> np.ndarray:\n \"\"\" Calculates the value of the observations. \"\"\"\n actions, _ = self.policy.predict(observations, False)\n return self.critique(observations, actions)\n\n def _update_trust(self, batch_size=10):\n \"\"\" Updates the trust values with samples from the buffer.\n (Eq. 5 and 8)\n \"\"\"\n if self.use_buffer_for_trust:\n batch = self.buffer.sample(batch_size)\n else:\n batch = self.buffer.latest()\n batch_size = 1\n if batch is None: # buffer not sufficiently full\n return\n\n # next observations\n obs = np.array([b[3] for b in batch]).reshape(batch_size, -1)\n v = self.value(obs)\n\n if self.group.use_advantage:\n # previous observations\n prev_obs = np.array([b[4] for b in batch]).reshape(batch_size,\n -1)\n prev_v = self.value(prev_obs)\n else:\n prev_v = np.zeros_like(v) # no comparison to own act (Eq. 5)\n\n targets = np.zeros(self.n_peers)\n counts = np.zeros(self.n_peers)\n for i in range(batch_size):\n target = (batch[i][0] + self.gamma * v[i]) - prev_v[i] # Eq. 8\n counts[batch[i][2]] += 1\n targets[batch[i][2]] += target\n\n # ensure counts are >= 1, don't change these values\n targets[counts == 0] = self.trust_values[counts == 0]\n counts[counts == 0] = 1\n\n targets /= counts\n # Eq. 4\n self.trust_values += self.group.lr * (targets - self.trust_values)\n\n def _on_step(self):\n \"\"\" Adds updates of the peer values, e.g., trust or agent\n values. \"\"\"\n super(Peer, self)._on_step() # noqa\n\n if not self.group.solo_epoch:\n # update values, e.g., trust and agent values after ever step\n for key in self.peer_value_functions.keys():\n self.peer_value_functions[key]()\n\n def _store_transition(self, replay_buffer, buffer_action, new_obs,\n reward, dones, infos):\n \"\"\" Adds suggestion buffer handling. \"\"\"\n\n # get previous observations\n old_obs = self._last_obs\n\n super(Peer, self)._store_transition(replay_buffer, # noqa\n buffer_action, new_obs,\n reward, dones, infos)\n\n if not self.group.solo_epoch:\n # store transition in suggestion buffer as well\n self.buffer.add(reward, buffer_action, self.followed_peer,\n new_obs, old_obs)\n\n def _predict_train(self, observation, state=None,\n episode_start=None, deterministic=False):\n \"\"\" The action selection during training involves the peers. \"\"\"\n if deterministic:\n return self.policy.predict(observation, state=state,\n episode_start=episode_start,\n deterministic=deterministic)\n else:\n return self.get_action(observation)\n\n def learn(self, solo_episode=False, **kwargs):\n \"\"\" Adds action selection with help of peers. \"\"\"\n predict = self.predict # safe for later\n\n # use peer suggestions only when wanted\n if not (self.solo_training or solo_episode):\n self.predict = self._predict_train\n else:\n self.followed_peer = self.group.peers.index(self)\n\n result = super(Peer, self).learn(**kwargs)\n\n self.predict = predict # noqa\n return result\n\n def _excluded_save_params(self):\n \"\"\" Excludes attributes that are functions. Otherwise, the save\n method fails. \"\"\"\n ex_list = super(Peer, self)._excluded_save_params()\n ex_list.extend([\"peer_value_functions\", \"peer_values\",\n \"group\", \"predict\"])\n return ex_list\n\n return Peer"
},
{
"identifier": "PeerEvalCallback",
"path": "callbacks.py",
"snippet": "class PeerEvalCallback(EvalCallback):\n \"\"\"\n Callback to track collective measurements about peers.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use\n ``eval_freq = max(eval_freq // n_envs, 1)``\n\n :param peer_group: The group of peers\n :param eval_env: The environment used for initialization\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every ``eval_freq`` call of the\n callback.\n :param log_path: Path to a folder where the evaluations\n (``evaluations.npz``) will be saved. It will be updated at each\n evaluation.\n :param deterministic: Whether the evaluation should\n use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose:\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has\n not been wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n peer_group: PeerGroup,\n eval_envs: List[Union[gym.Env, VecEnv]],\n n_samples=100,\n **kwargs\n ):\n self.peer_group = peer_group\n self.eval_envs = eval_envs\n self.n_samples = n_samples\n\n self.last_logged_matrix = None\n self.follow_matrix = np.zeros((len(peer_group), len(peer_group)))\n\n self.start_time = time.time()\n\n super().__init__(**kwargs)\n\n def _on_step(self) -> bool:\n self.accumulate_followed_peers() # needs to be done at every step\n\n # log time for debugging etc.\n self.logger.record(\"time/time_elapsed\",\n time.time() - self.start_time,\n exclude=\"tensorboard\")\n\n super()._on_step()\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n if 'agent_values' in self.peer_group.__dict__:\n self.track_agent_values()\n if 'trust_values' in self.peer_group.peers[0].__dict__:\n self.track_trust_values()\n self.track_followed_agent(self.peer_group.active_peer)\n\n peer = self.peer_group.active_peer\n eval_values = {\n f\"Peer{peer}_0/eval/mean_reward\": self.last_mean_reward,\n }\n if peer == len(self.peer_group) - 1:\n eval_values[\"global_step\"] = self.n_calls\n wandb.log(eval_values, commit=True)\n else:\n wandb.log(eval_values, commit=False)\n return True\n\n def track_agent_values(self):\n n_agents = len(self.peer_group.peers)\n for i in range(n_agents):\n agent_value = self.peer_group.agent_values[i]\n wandb.log({'Peer{}_0/eval/agent_value'.format(i): agent_value},\n commit=False)\n return True\n\n def track_trust_values(self):\n peer = self.peer_group.active_peer\n trust_i = self.peer_group.peers[peer].trust_values\n for j, el in np.ndenumerate(trust_i):\n wandb.log({'Peer{}_0/eval/trust_{}'.format(peer, j[0]): el},\n commit=False)\n return True\n\n def accumulate_followed_peers(self):\n peer = self.peer_group.active_peer\n followed_peer = self.peer_group.peers[peer].followed_peer\n if followed_peer is not None:\n self.follow_matrix[peer, followed_peer] += 1\n\n def track_followed_agent(self, active_peer):\n if self.last_logged_matrix is None:\n diff = self.follow_matrix\n else:\n diff = self.follow_matrix - self.last_logged_matrix\n\n for (followed_peer,), count in np.ndenumerate(\n self.follow_matrix[active_peer]):\n wandb.log({'Peer{}_0/eval/follow_count{}'.format(\n active_peer, followed_peer): count}, commit=False)\n # also log difference\n wandb.log({'Peer{}_0/eval/follow_count_{}diff'.format(\n active_peer, followed_peer): diff[active_peer, followed_peer]},\n commit=False)\n self.last_logged_matrix = np.copy(self.follow_matrix)\n\n def commit_global_step(self, timesteps):\n if self.peer_group.active_peer == len(self.peer_group) - 1:\n eval_values = {\"global_step\": self.n_calls + self.eval_freq}\n wandb.log(eval_values, commit=True)\n\n self.n_calls += timesteps"
},
{
"identifier": "str2bool",
"path": "utils.py",
"snippet": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')"
},
{
"identifier": "add_default_values_to_parser",
"path": "utils.py",
"snippet": "def add_default_values_to_parser(parser):\n parser.add_argument(\"--job_id\", type=str,\n default=wandb.util.generate_id())\n parser.add_argument(\"--agent-count\", type=int, help=\"Number of agents.\",\n default=4)\n parser.add_argument(\"--device\", type=str, default=\"auto\",\n choices=[\"cpu\", \"cuda\", \"auto\"],\n help=\"Device to use, either 'cpu', 'cuda' for GPU or \"\n \"'auto'.\")\n parser.add_argument(\"--env\", type=str, default=\"HalfCheetahBulletEnv-v0\",\n help=\"OpenAI Gym environment to perform algorithm on.\")\n parser.add_argument(\"--env_args\", action=StoreDictKeyPair,\n nargs='*', metavar=\"KEY=VAL\", default={})\n parser.add_argument(\"--seed\", type=int, default=1,\n help=\"Random seed in [0, 2 ** 32)\")\n parser.add_argument(\"--wandb\", type=str, default='offline',\n choices=[\"online\", \"offline\", \"disabled\"])\n parser.add_argument(\"--discrete-actions\", type=str2bool, nargs=\"?\",\n const=False, default=False)\n parser.add_argument(\"--save-dir\", type=Path,\n default=Path.cwd().joinpath(\"Experiments\"))\n\n # Agents\n agent_parser = parser.add_argument_group(\"Agent\")\n agent_parser.add_argument(\"--mix-agents\", type=str, nargs='*',\n default=[\"SAC\"])\n\n agent_parser.add_argument(\"--net-arch\", type=int, nargs='*',\n action='append')\n agent_parser.add_argument(\"--load_paths\", type=str, nargs='*',\n default=[])\n agent_parser.add_argument(\"--agents_to_store\", type=int, nargs='*',\n default=[])\n\n return parser"
},
{
"identifier": "log_reward_avg_in_wandb",
"path": "utils.py",
"snippet": "def log_reward_avg_in_wandb(callbacks):\n results = []\n for callback in callbacks:\n eval_callback = callback[-1]\n result = eval_callback.evaluations_results\n results.append(np.mean(result))\n wandb.log({'reward_avg': np.mean(results)})"
},
{
"identifier": "add_default_values_to_train_parser",
"path": "utils.py",
"snippet": "def add_default_values_to_train_parser(training_parser):\n training_parser.add_argument(\"--steps\", type=int, default=3_000_000,\n help=\"Total number of time steps to train \"\n \"the agent.\")\n training_parser.add_argument(\"--eval-interval\", type=int,\n default=10_000,\n help=\"Interval in time steps between \"\n \"evaluations.\")\n training_parser.add_argument(\"--n-eval-episodes\", type=int,\n default=10,\n help=\"Number of episodes for each \"\n \"evaluation.\")\n training_parser.add_argument(\"--buffer-size\", type=int,\n default=1_000_000)\n training_parser.add_argument(\"--buffer-start-size\", type=int,\n default=1_000,\n help=\"Minimum replay buffer size before \"\n \"performing gradient updates.\")\n training_parser.add_argument(\"--batch-size\", type=int,\n default=100,\n help=\"Minibatch size\")\n training_parser.add_argument(\"--min-epoch-length\", type=int,\n default=10_000,\n help=\"Minimal length of a training_parser \"\n \"epoch.\")\n training_parser.add_argument(\"--learning_rate\", type=str2func, nargs='*',\n default=[3e-4],\n help='Learning rate for adam optimizer, '\n 'the same learning rate will be used '\n 'for all networks (Q-Values, Actor and '\n 'Value function) it can be a function'\n ' of the current progress remaining '\n '(from 1 to 0)')\n training_parser.add_argument(\"--tau\", type=float, default=0.005)\n training_parser.add_argument(\"--gamma\", type=float, default=0.99)\n training_parser.add_argument(\"--gradient_steps\", type=int,\n default=1)\n training_parser.add_argument(\"--train_freq\", type=int,\n default=1)\n training_parser.add_argument(\"--target_update_interval\", type=int,\n default=1)\n dqn_parser = training_parser.add_argument_group(\"DQN\")\n dqn_parser.add_argument(\"--exploration-fraction\", type=float, default=0.1)\n dqn_parser.add_argument(\"--exploration-final-eps\", type=float,\n default=0.05)\n return training_parser"
},
{
"identifier": "new_random_seed",
"path": "utils.py",
"snippet": "def new_random_seed():\n return np.random.randint(np.iinfo(np.int32).max)"
},
{
"identifier": "make_env",
"path": "utils.py",
"snippet": "def make_env(env_str, n_envs=1, **env_args):\n envs = []\n for _ in range(n_envs):\n def env_func():\n env = Monitor(gym.make(env_str, **env_args))\n env.seed(new_random_seed())\n return env\n\n envs.append(env_func)\n return DummyVecEnv(envs)"
},
{
"identifier": "ControllerArguments",
"path": "utils.py",
"snippet": "class ControllerArguments:\n def __init__(self, number_agents):\n self.number_agents = number_agents\n\n def argument_for_every_agent(self, arguments, i):\n if type(arguments) is list:\n if len(arguments) == 1:\n return arguments[0]\n elif len(arguments) == self.number_agents:\n return arguments[i]\n else:\n raise AssertionError(f'number of arguments ({len(arguments)}) '\n f'has to be 1 or == number of agents '\n f'({self.number_agents}) input is'\n f' {arguments}')\n else:\n raise AssertionError(f'input is not a list input is{arguments} '\n f'{type(arguments)}')"
}
] | import argparse
import datetime
import gym
import wandb
import predefined_agents # noqa: F401
import env as local_envs # noqa: F401
from pathlib import Path
from stable_baselines3 import SAC, TD3
from stable_baselines3.common.utils import set_random_seed, \
update_learning_rate
from wandb.integration.sb3 import WandbCallback
from dqn_peer import DQNPeer
from peer import PeerGroup, make_peer_class
from callbacks import PeerEvalCallback
from utils import str2bool, add_default_values_to_parser, \
log_reward_avg_in_wandb, add_default_values_to_train_parser, \
new_random_seed, make_env, ControllerArguments | 8,226 | option_on = (args.use_trust or args.use_critic or args.use_agent_value)
assert (option_on and args.peer_learning) or not option_on
# create results/experiments folder
time_string = datetime.datetime.now().strftime("%Y-%m-%d_%H.%M.%S")
unique_dir = f"{time_string}__{args.job_id}"
experiment_folder = args.save_dir.joinpath(args.save_name, unique_dir)
experiment_folder.mkdir(exist_ok=True, parents=True)
str_folder = str(experiment_folder)
print("Experiment folder is", str_folder)
# suppress gym warnings
gym.logger.set_level(level=gym.logger.DISABLED)
# seed everything
set_random_seed(args.seed)
# init wandb
wandb.tensorboard.patch(root_logdir=str_folder)
run = wandb.init(entity="jgu-wandb", config=args.__dict__,
project="peer-learning",
monitor_gym=True, sync_tensorboard=False,
name=f"{args.save_name}__{args.job_id}",
notes=f"Peer Learning with {args.agent_count} agents on "
f"the {args.env.split('-')[0]} environment.",
dir=str_folder, mode=args.wandb)
# initialize peer group
algo_args = []
peer_args = []
for i in range(args.agent_count):
algo_args.append(
dict(policy="MlpPolicy",
verbose=1,
policy_kwargs=dict(
net_arch=CA.argument_for_every_agent(args.net_arch, i)
),
buffer_size=args.buffer_size,
batch_size=args.batch_size,
gamma=args.gamma,
tau=args.tau,
train_freq=args.train_freq,
target_update_interval=args.target_update_interval,
gradient_steps=args.gradient_steps,
learning_starts=args.buffer_start_size,
learning_rate=CA.argument_for_every_agent(args.learning_rate,
i),
tensorboard_log=None,
device=args.device))
peer_args.append(
dict(temperature=CA.argument_for_every_agent(args.T, i),
temp_decay=CA.argument_for_every_agent(args.T_decay, i),
algo_args=algo_args[i],
env=args.env,
env_args=args.env_args,
use_trust=args.use_trust,
use_critic=args.use_critic,
buffer_size=args.trust_buffer_size,
follow_steps=args.follow_steps,
use_trust_buffer=args.use_trust_buffer,
solo_training=not args.peer_learning,
peers_sample_with_noise=args.peers_sample_with_noise,
sample_random_actions=args.sample_random_actions,
init_trust_values=args.init_trust_values,
sample_from_suggestions=args.sample_from_suggestions,
epsilon=args.epsilon,
only_follow_peers=args.only_follow_peers))
# create Peer classes
SACPeer = make_peer_class(SAC)
TD3Peer = make_peer_class(TD3)
# create peers and peer group
peers = []
callbacks = []
eval_envs = []
for i in range(args.agent_count):
args_for_agent = peer_args[i]
agent_algo = CA.argument_for_every_agent(args.mix_agents, i)
if agent_algo == 'SAC':
args_for_agent["algo_args"]["ent_coef"] = "auto"
args_for_agent["algo_args"]["use_sde"] = True
args_for_agent["algo_args"]["policy_kwargs"]["log_std_init"] = -3
peer = SACPeer(**args_for_agent, seed=new_random_seed())
elif agent_algo == 'TD3':
peer = TD3Peer(**args_for_agent, seed=new_random_seed())
elif agent_algo == 'DQN':
args_for_agent["algo_args"]["exploration_fraction"] = \
args.exploration_fraction
args_for_agent["algo_args"]["exploration_final_eps"] = \
args.exploration_final_eps
peer = DQNPeer(**args_for_agent, seed=new_random_seed())
elif agent_algo in ['Adversarial', 'Expert']:
class_str = f"predefined_agents." \
f"{args.env.split('-')[0]}{agent_algo}"
peer = eval(class_str)(**args_for_agent, seed=new_random_seed())
else:
raise NotImplementedError(
f"The Agent {agent_algo}"
f" is not implemented")
peers.append(peer)
eval_env = make_env(args.env, args.n_eval_episodes, **args.env_args)
# every agent gets its own callbacks
callbacks.append([WandbCallback(verbose=2)])
eval_envs.append(eval_env)
peer_group = PeerGroup(peers, use_agent_values=args.use_agent_value,
lr=args.trust_lr, switch_ratio=args.switch_ratio,
init_agent_values=args.init_agent_values,
use_advantage=args.use_advantage,
max_peer_epochs=args.max_peer_epochs)
# create callbacks
for i in range(args.agent_count):
|
def add_args():
# create arg parser
parser = argparse.ArgumentParser(description="Peer learning.")
# General
parser.add_argument("--save-name", type=str, default="delete_me")
parser = add_default_values_to_parser(parser)
# Training
training = parser.add_argument_group("Training")
add_default_values_to_train_parser(training)
# Peer Learning
peer_learning = parser.add_argument_group("Peer Learning")
peer_learning.add_argument("--follow-steps", type=int, default=10)
peer_learning.add_argument("--switch-ratio", type=float, default=1,
help="How many times peer training compared to "
"solo training Ratio of peer learning "
"episodes to solo episodes; 0 -> only "
"peer learning episodes."
"ratio 0 {'solo': 0, 'peer': 100}"
"ratio 0.2 {'solo': 83, 'peer': 17}"
"ratio 0.25 {'solo': 80, 'peer': 20}"
"ratio 0.333333 {'solo': 75, 'peer': 25}"
"ratio 0.5 {'solo': 67, 'peer': 33}"
"ratio 1 {'solo': 50, 'peer': 50}"
"ratio 2 {'solo': 33, 'peer': 67}"
"ratio 3 {'solo': 25, 'peer': 75}"
"ratio 4 {'solo': 20, 'peer': 80}"
"ratio 5 {'solo': 17, 'peer': 83}")
peer_learning.add_argument("--peer-learning", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--peers-sample-with-noise", type=str2bool,
nargs="?",
const=True, default=True)
peer_learning.add_argument("--use-agent-value", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--use-trust", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--use-trust-buffer", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--trust-buffer-size", type=int, default=1000)
peer_learning.add_argument("--use-critic", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--sample_random_actions", type=str2bool,
nargs="?", const=True, default=False)
peer_learning.add_argument("--trust-lr", type=float, default=0.001)
peer_learning.add_argument("--T", type=float, nargs='*', default=[1])
peer_learning.add_argument("--T-decay", type=float, nargs='*', default=[0])
peer_learning.add_argument("--init-trust-values", type=float, default=200)
peer_learning.add_argument("--init-agent-values", type=float, default=200)
peer_learning.add_argument("--use-advantage", type=str2bool, nargs="?",
const=False, default=False)
peer_learning.add_argument("--sample-from-suggestions", type=str2bool,
nargs="?", const=False, default=False)
peer_learning.add_argument("--epsilon", type=float, default=0.0)
peer_learning.add_argument("--max-peer-epochs", type=int,
default=1_000_000_000)
peer_learning.add_argument("--only-follow-peers", type=str2bool,
nargs="?", const=False, default=False)
return parser
if __name__ == '__main__':
# parse args
arg_parser = add_args()
args = arg_parser.parse_args()
CA = ControllerArguments(args.agent_count)
# assert if any peer learning strategy is chosen peer learning must be True
option_on = (args.use_trust or args.use_critic or args.use_agent_value)
assert (option_on and args.peer_learning) or not option_on
# create results/experiments folder
time_string = datetime.datetime.now().strftime("%Y-%m-%d_%H.%M.%S")
unique_dir = f"{time_string}__{args.job_id}"
experiment_folder = args.save_dir.joinpath(args.save_name, unique_dir)
experiment_folder.mkdir(exist_ok=True, parents=True)
str_folder = str(experiment_folder)
print("Experiment folder is", str_folder)
# suppress gym warnings
gym.logger.set_level(level=gym.logger.DISABLED)
# seed everything
set_random_seed(args.seed)
# init wandb
wandb.tensorboard.patch(root_logdir=str_folder)
run = wandb.init(entity="jgu-wandb", config=args.__dict__,
project="peer-learning",
monitor_gym=True, sync_tensorboard=False,
name=f"{args.save_name}__{args.job_id}",
notes=f"Peer Learning with {args.agent_count} agents on "
f"the {args.env.split('-')[0]} environment.",
dir=str_folder, mode=args.wandb)
# initialize peer group
algo_args = []
peer_args = []
for i in range(args.agent_count):
algo_args.append(
dict(policy="MlpPolicy",
verbose=1,
policy_kwargs=dict(
net_arch=CA.argument_for_every_agent(args.net_arch, i)
),
buffer_size=args.buffer_size,
batch_size=args.batch_size,
gamma=args.gamma,
tau=args.tau,
train_freq=args.train_freq,
target_update_interval=args.target_update_interval,
gradient_steps=args.gradient_steps,
learning_starts=args.buffer_start_size,
learning_rate=CA.argument_for_every_agent(args.learning_rate,
i),
tensorboard_log=None,
device=args.device))
peer_args.append(
dict(temperature=CA.argument_for_every_agent(args.T, i),
temp_decay=CA.argument_for_every_agent(args.T_decay, i),
algo_args=algo_args[i],
env=args.env,
env_args=args.env_args,
use_trust=args.use_trust,
use_critic=args.use_critic,
buffer_size=args.trust_buffer_size,
follow_steps=args.follow_steps,
use_trust_buffer=args.use_trust_buffer,
solo_training=not args.peer_learning,
peers_sample_with_noise=args.peers_sample_with_noise,
sample_random_actions=args.sample_random_actions,
init_trust_values=args.init_trust_values,
sample_from_suggestions=args.sample_from_suggestions,
epsilon=args.epsilon,
only_follow_peers=args.only_follow_peers))
# create Peer classes
SACPeer = make_peer_class(SAC)
TD3Peer = make_peer_class(TD3)
# create peers and peer group
peers = []
callbacks = []
eval_envs = []
for i in range(args.agent_count):
args_for_agent = peer_args[i]
agent_algo = CA.argument_for_every_agent(args.mix_agents, i)
if agent_algo == 'SAC':
args_for_agent["algo_args"]["ent_coef"] = "auto"
args_for_agent["algo_args"]["use_sde"] = True
args_for_agent["algo_args"]["policy_kwargs"]["log_std_init"] = -3
peer = SACPeer(**args_for_agent, seed=new_random_seed())
elif agent_algo == 'TD3':
peer = TD3Peer(**args_for_agent, seed=new_random_seed())
elif agent_algo == 'DQN':
args_for_agent["algo_args"]["exploration_fraction"] = \
args.exploration_fraction
args_for_agent["algo_args"]["exploration_final_eps"] = \
args.exploration_final_eps
peer = DQNPeer(**args_for_agent, seed=new_random_seed())
elif agent_algo in ['Adversarial', 'Expert']:
class_str = f"predefined_agents." \
f"{args.env.split('-')[0]}{agent_algo}"
peer = eval(class_str)(**args_for_agent, seed=new_random_seed())
else:
raise NotImplementedError(
f"The Agent {agent_algo}"
f" is not implemented")
peers.append(peer)
eval_env = make_env(args.env, args.n_eval_episodes, **args.env_args)
# every agent gets its own callbacks
callbacks.append([WandbCallback(verbose=2)])
eval_envs.append(eval_env)
peer_group = PeerGroup(peers, use_agent_values=args.use_agent_value,
lr=args.trust_lr, switch_ratio=args.switch_ratio,
init_agent_values=args.init_agent_values,
use_advantage=args.use_advantage,
max_peer_epochs=args.max_peer_epochs)
# create callbacks
for i in range(args.agent_count): | peer_callback = PeerEvalCallback(eval_env=eval_envs[i], | 3 | 2023-12-13 10:40:55+00:00 | 12k |
ZS-YANG/FemtoDet-v3 | projects/Detic_new/detic/detic.py | [
{
"identifier": "LVISV1Dataset",
"path": "mmdet/datasets/lvis.py",
"snippet": "class LVISV1Dataset(LVISDataset):\n \"\"\"LVIS v1 dataset for detection.\"\"\"\n\n METAINFO = {\n 'classes':\n ('aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock',\n 'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet',\n 'antenna', 'apple', 'applesauce', 'apricot', 'apron', 'aquarium',\n 'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',\n 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',\n 'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',\n 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',\n 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',\n 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',\n 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',\n 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',\n 'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',\n 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',\n 'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',\n 'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',\n 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',\n 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',\n 'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',\n 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',\n 'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',\n 'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',\n 'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',\n 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',\n 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',\n 'bottle_opener', 'bouquet', 'bow_(weapon)',\n 'bow_(decorative_ribbons)', 'bow-tie', 'bowl', 'pipe_bowl',\n 'bowler_hat', 'bowling_ball', 'box', 'boxing_glove', 'suspenders',\n 'bracelet', 'brass_plaque', 'brassiere', 'bread-bin', 'bread',\n 'breechcloth', 'bridal_gown', 'briefcase', 'broccoli', 'broach',\n 'broom', 'brownie', 'brussels_sprouts', 'bubble_gum', 'bucket',\n 'horse_buggy', 'bull', 'bulldog', 'bulldozer', 'bullet_train',\n 'bulletin_board', 'bulletproof_vest', 'bullhorn', 'bun', 'bunk_bed',\n 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butter',\n 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', 'cabinet',\n 'locker', 'cake', 'calculator', 'calendar', 'calf', 'camcorder',\n 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', 'can',\n 'can_opener', 'candle', 'candle_holder', 'candy_bar', 'candy_cane',\n 'walking_cane', 'canister', 'canoe', 'cantaloup', 'canteen',\n 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',\n 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',\n 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',\n 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',\n 'cash_register', 'casserole', 'cassette', 'cast', 'cat',\n 'cauliflower', 'cayenne_(spice)', 'CD_player', 'celery',\n 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue',\n 'chalice', 'chandelier', 'chap', 'checkbook', 'checkerboard',\n 'cherry', 'chessboard', 'chicken_(animal)', 'chickpea',\n 'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)',\n 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk',\n 'chocolate_mousse', 'choker', 'chopping_board', 'chopstick',\n 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette',\n 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent',\n 'cleat_(for_securing_rope)', 'clementine', 'clip', 'clipboard',\n 'clippers_(for_plants)', 'cloak', 'clock', 'clock_tower',\n 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat',\n 'coat_hanger', 'coatrack', 'cock', 'cockroach', 'cocoa_(beverage)',\n 'coconut', 'coffee_maker', 'coffee_table', 'coffeepot', 'coil',\n 'coin', 'colander', 'coleslaw', 'coloring_material',\n 'combination_lock', 'pacifier', 'comic_book', 'compass',\n 'computer_keyboard', 'condiment', 'cone', 'control',\n 'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',\n 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',\n 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',\n 'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',\n 'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',\n 'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',\n 'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',\n 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',\n 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',\n 'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',\n 'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',\n 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',\n 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table',\n 'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',\n 'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',\n 'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',\n 'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove',\n 'dragonfly', 'drawer', 'underdrawers', 'dress', 'dress_hat',\n 'dress_suit', 'dresser', 'drill', 'drone', 'dropper',\n 'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling',\n 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', 'eagle',\n 'earphone', 'earplug', 'earring', 'easel', 'eclair', 'eel', 'egg',\n 'egg_roll', 'egg_yolk', 'eggbeater', 'eggplant', 'electric_chair',\n 'refrigerator', 'elephant', 'elk', 'envelope', 'eraser', 'escargot',\n 'eyepatch', 'falcon', 'fan', 'faucet', 'fedora', 'ferret',\n 'Ferris_wheel', 'ferry', 'fig_(fruit)', 'fighter_jet', 'figurine',\n 'file_cabinet', 'file_(tool)', 'fire_alarm', 'fire_engine',\n 'fire_extinguisher', 'fire_hose', 'fireplace', 'fireplug',\n 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl', 'fishing_rod',\n 'flag', 'flagpole', 'flamingo', 'flannel', 'flap', 'flash',\n 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)',\n 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair',\n 'food_processor', 'football_(American)', 'football_helmet',\n 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast',\n 'freshener', 'frisbee', 'frog', 'fruit_juice', 'frying_pan', 'fudge',\n 'funnel', 'futon', 'gag', 'garbage', 'garbage_truck', 'garden_hose',\n 'gargle', 'gargoyle', 'garlic', 'gasmask', 'gazelle', 'gelatin',\n 'gemstone', 'generator', 'giant_panda', 'gift_wrap', 'ginger',\n 'giraffe', 'cincture', 'glass_(drink_container)', 'globe', 'glove',\n 'goat', 'goggles', 'goldfish', 'golf_club', 'golfcart',\n 'gondola_(boat)', 'goose', 'gorilla', 'gourd', 'grape', 'grater',\n 'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle',\n 'grill', 'grits', 'grizzly', 'grocery_bag', 'guitar', 'gull', 'gun',\n 'hairbrush', 'hairnet', 'hairpin', 'halter_top', 'ham', 'hamburger',\n 'hammer', 'hammock', 'hamper', 'hamster', 'hair_dryer', 'hand_glass',\n 'hand_towel', 'handcart', 'handcuff', 'handkerchief', 'handle',\n 'handsaw', 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil',\n 'headband', 'headboard', 'headlight', 'headscarf', 'headset',\n 'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',\n 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',\n 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',\n 'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',\n 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',\n 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',\n 'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',\n 'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',\n 'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',\n 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',\n 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',\n 'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',\n 'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',\n 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',\n 'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',\n 'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade',\n 'lettuce', 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',\n 'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',\n 'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat',\n 'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',\n 'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange',\n 'manger', 'manhole', 'map', 'marker', 'martini', 'mascot',\n 'mashed_potato', 'masher', 'mask', 'mast', 'mat_(gym_equipment)',\n 'matchbox', 'mattress', 'measuring_cup', 'measuring_stick',\n 'meatball', 'medicine', 'melon', 'microphone', 'microscope',\n 'microwave_oven', 'milestone', 'milk', 'milk_can', 'milkshake',\n 'minivan', 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)',\n 'money', 'monitor_(computer_equipment) computer_monitor', 'monkey',\n 'motor', 'motor_scooter', 'motor_vehicle', 'motorcycle',\n 'mound_(baseball)', 'mouse_(computer_equipment)', 'mousepad',\n 'muffin', 'mug', 'mushroom', 'music_stool', 'musical_instrument',\n 'nailfile', 'napkin', 'neckerchief', 'necklace', 'necktie', 'needle',\n 'nest', 'newspaper', 'newsstand', 'nightshirt',\n 'nosebag_(for_animals)', 'noseband_(for_animals)', 'notebook',\n 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)',\n 'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion',\n 'orange_(fruit)', 'orange_juice', 'ostrich', 'ottoman', 'oven',\n 'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle',\n 'padlock', 'paintbrush', 'painting', 'pajamas', 'palette',\n 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose',\n 'papaya', 'paper_plate', 'paper_towel', 'paperback_book',\n 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)',\n 'parasol', 'parchment', 'parka', 'parking_meter', 'parrot',\n 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',\n 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',\n 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',\n 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',\n 'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',\n 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',\n 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',\n 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',\n 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',\n 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',\n 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',\n 'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',\n 'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',\n 'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',\n 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot',\n 'potato', 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn',\n 'pretzel', 'printer', 'projectile_(weapon)', 'projector', 'propeller',\n 'prune', 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin',\n 'puncher', 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt',\n 'rabbit', 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver',\n 'radish', 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry',\n 'rat', 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',\n 'recliner', 'record_player', 'reflector', 'remote_control',\n 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',\n 'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',\n 'rolling_pin', 'root_beer', 'router_(computer_equipment)',\n 'rubber_band', 'runner_(carpet)', 'plastic_bag',\n 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',\n 'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',\n 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',\n 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',\n 'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',\n 'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',\n 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',\n 'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',\n 'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',\n 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',\n 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',\n 'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',\n 'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',\n 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',\n 'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',\n 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',\n 'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',\n 'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',\n 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',\n 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',\n 'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',\n 'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',\n 'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',\n 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew',\n 'stirrer', 'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove',\n 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry',\n 'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',\n 'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower',\n 'sunglasses', 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants',\n 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit',\n 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',\n 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',\n 'tambourine', 'army_tank', 'tank_(storage_vessel)',\n 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',\n 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',\n 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',\n 'telephone_pole', 'telephoto_lens', 'television_camera',\n 'television_set', 'tennis_ball', 'tennis_racket', 'tequila',\n 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',\n 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer',\n 'tinfoil', 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster',\n 'toaster_oven', 'toilet', 'toilet_tissue', 'tomato', 'tongs',\n 'toolbox', 'toothbrush', 'toothpaste', 'toothpick', 'cover',\n 'tortilla', 'tow_truck', 'towel', 'towel_rack', 'toy',\n 'tractor_(farm_equipment)', 'traffic_light', 'dirt_bike',\n 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', 'tray',\n 'trench_coat', 'triangle_(musical_instrument)', 'tricycle', 'tripod',\n 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat', 'turban',\n 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',\n 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',\n 'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',\n 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',\n 'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',\n 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',\n 'washbasin', 'automatic_washer', 'watch', 'water_bottle',\n 'water_cooler', 'water_faucet', 'water_heater', 'water_jug',\n 'water_gun', 'water_scooter', 'water_ski', 'water_tower',\n 'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',\n 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',\n 'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',\n 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',\n 'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',\n 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',\n 'yoke_(animal_equipment)', 'zebra', 'zucchini'),\n 'palette':\n None\n }\n\n def load_data_list(self) -> List[dict]:\n \"\"\"Load annotations from an annotation file named as ``self.ann_file``\n\n Returns:\n List[dict]: A list of annotation.\n \"\"\" # noqa: E501\n try:\n import lvis\n if getattr(lvis, '__version__', '0') >= '10.5.3':\n warnings.warn(\n 'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"', # noqa: E501\n UserWarning)\n from lvis import LVIS\n except ImportError:\n raise ImportError(\n 'Package lvis is not installed. Please run \"pip install git+https://github.com/lvis-dataset/lvis-api.git\".' # noqa: E501\n )\n with get_local_path(\n self.ann_file, backend_args=self.backend_args) as local_path:\n self.lvis = LVIS(local_path)\n self.cat_ids = self.lvis.get_cat_ids()\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.cat_img_map = copy.deepcopy(self.lvis.cat_img_map)\n\n img_ids = self.lvis.get_img_ids()\n data_list = []\n total_ann_ids = []\n for img_id in img_ids:\n raw_img_info = self.lvis.load_imgs([img_id])[0]\n raw_img_info['img_id'] = img_id\n # coco_url is used in LVISv1 instead of file_name\n # e.g. http://images.cocodataset.org/train2017/000000391895.jpg\n # train/val split in specified in url\n raw_img_info['file_name'] = raw_img_info['coco_url'].replace(\n 'http://images.cocodataset.org/', '')\n ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])\n raw_ann_info = self.lvis.load_anns(ann_ids)\n total_ann_ids.extend(ann_ids)\n parsed_data_info = self.parse_data_info({\n 'raw_ann_info':\n raw_ann_info,\n 'raw_img_info':\n raw_img_info\n })\n data_list.append(parsed_data_info)\n if self.ANN_ID_UNIQUE:\n assert len(set(total_ann_ids)) == len(\n total_ann_ids\n ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n del self.lvis\n\n return data_list"
},
{
"identifier": "CascadeRCNN",
"path": "mmdet/models/detectors/cascade_rcnn.py",
"snippet": "class CascadeRCNN(TwoStageDetector):\n r\"\"\"Implementation of `Cascade R-CNN: Delving into High Quality Object\n Detection <https://arxiv.org/abs/1906.09756>`_\"\"\"\n\n def __init__(self,\n backbone: ConfigType,\n neck: OptConfigType = None,\n rpn_head: OptConfigType = None,\n roi_head: OptConfigType = None,\n train_cfg: OptConfigType = None,\n test_cfg: OptConfigType = None,\n data_preprocessor: OptConfigType = None,\n init_cfg: OptMultiConfig = None) -> None:\n super().__init__(\n backbone=backbone,\n neck=neck,\n rpn_head=rpn_head,\n roi_head=roi_head,\n train_cfg=train_cfg,\n test_cfg=test_cfg,\n data_preprocessor=data_preprocessor,\n init_cfg=init_cfg)"
},
{
"identifier": "MODELS",
"path": "mmdet/registry.py",
"snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])"
},
{
"identifier": "SampleList",
"path": "mmdet/structures/det_data_sample.py",
"snippet": "class DetDataSample(BaseDataElement):\n def proposals(self) -> InstanceData:\n def proposals(self, value: InstanceData):\n def proposals(self):\n def gt_instances(self) -> InstanceData:\n def gt_instances(self, value: InstanceData):\n def gt_instances(self):\n def pred_instances(self) -> InstanceData:\n def pred_instances(self, value: InstanceData):\n def pred_instances(self):\n def pred_track_instances(self) -> InstanceData:\n def pred_track_instances(self, value: InstanceData):\n def pred_track_instances(self):\n def ignored_instances(self) -> InstanceData:\n def ignored_instances(self, value: InstanceData):\n def ignored_instances(self):\n def gt_panoptic_seg(self) -> PixelData:\n def gt_panoptic_seg(self, value: PixelData):\n def gt_panoptic_seg(self):\n def pred_panoptic_seg(self) -> PixelData:\n def pred_panoptic_seg(self, value: PixelData):\n def pred_panoptic_seg(self):\n def gt_sem_seg(self) -> PixelData:\n def gt_sem_seg(self, value: PixelData):\n def gt_sem_seg(self):\n def pred_sem_seg(self) -> PixelData:\n def pred_sem_seg(self, value: PixelData):\n def pred_sem_seg(self):"
}
] | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import clip
from typing import List, Union
from mmengine.logging import print_log
from torch import Tensor
from mmdet.datasets import LVISV1Dataset
from mmdet.models.detectors.cascade_rcnn import CascadeRCNN
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from clip.simple_tokenizer import SimpleTokenizer
from mmdet.datasets import CocoDataset
from mmdet.datasets import CityscapesDataset
from mmdet.datasets import VOCDataset
from mmdet.datasets import OpenImagesDataset
from mmdet.datasets import LVISV1Dataset | 8,249 | # Copyright (c) OpenMMLab. All rights reserved.
class CLIPTextEncoder(nn.Module):
def __init__(self, model_name='ViT-B/32'):
super().__init__()
self.tokenizer = SimpleTokenizer()
pretrained_model, _ = clip.load(model_name, device='cpu')
self.clip = pretrained_model
@property
def device(self):
return self.clip.device
@property
def dtype(self):
return self.clip.dtype
def tokenize(self,
texts: Union[str, List[str]],
context_length: int = 77) -> torch.LongTensor:
if isinstance(texts, str):
texts = [texts]
sot_token = self.tokenizer.encoder['<|startoftext|>']
eot_token = self.tokenizer.encoder['<|endoftext|>']
all_tokens = [[sot_token] + self.tokenizer.encode(text) + [eot_token]
for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
st = torch.randint(len(tokens) - context_length + 1,
(1, ))[0].item()
tokens = tokens[st:st + context_length]
result[i, :len(tokens)] = torch.tensor(tokens)
return result
def forward(self, text):
text = self.tokenize(text)
text_features = self.clip.encode_text(text)
return text_features
def get_class_weight(original_caption, prompt_prefix='a '):
if isinstance(original_caption, str):
if original_caption == 'coco':
class_names = CocoDataset.METAINFO['classes']
elif original_caption == 'cityscapes':
class_names = CityscapesDataset.METAINFO['classes']
elif original_caption == 'voc':
class_names = VOCDataset.METAINFO['classes']
elif original_caption == 'openimages':
class_names = OpenImagesDataset.METAINFO['classes']
elif original_caption == 'lvis':
class_names = LVISV1Dataset.METAINFO['classes']
else:
if not original_caption.endswith('.'):
original_caption = original_caption + ' . '
original_caption = original_caption.split(' . ')
class_names = list(filter(lambda x: len(x) > 0, original_caption))
# for test.py
else:
class_names = list(original_caption)
text_encoder = CLIPTextEncoder()
text_encoder.eval()
texts = [prompt_prefix + x for x in class_names]
print_log(f'Computing text embeddings for {len(class_names)} classes.')
embeddings = text_encoder(texts).detach().permute(1, 0).contiguous().cpu()
return class_names, embeddings
def reset_cls_layer_weight(roi_head, weight):
if type(weight) == str:
print_log(f'Resetting cls_layer_weight from file: {weight}')
zs_weight = torch.tensor(
np.load(weight),
dtype=torch.float32).permute(1, 0).contiguous() # D x C
else:
zs_weight = weight
zs_weight = torch.cat(
[zs_weight, zs_weight.new_zeros(
(zs_weight.shape[0], 1))], dim=1) # D x (C + 1)
zs_weight = F.normalize(zs_weight, p=2, dim=0)
zs_weight = zs_weight.to('cuda')
num_classes = zs_weight.shape[-1]
for bbox_head in roi_head.bbox_head:
bbox_head.num_classes = num_classes
del bbox_head.fc_cls.zs_weight
bbox_head.fc_cls.zs_weight = zs_weight
| # Copyright (c) OpenMMLab. All rights reserved.
class CLIPTextEncoder(nn.Module):
def __init__(self, model_name='ViT-B/32'):
super().__init__()
self.tokenizer = SimpleTokenizer()
pretrained_model, _ = clip.load(model_name, device='cpu')
self.clip = pretrained_model
@property
def device(self):
return self.clip.device
@property
def dtype(self):
return self.clip.dtype
def tokenize(self,
texts: Union[str, List[str]],
context_length: int = 77) -> torch.LongTensor:
if isinstance(texts, str):
texts = [texts]
sot_token = self.tokenizer.encoder['<|startoftext|>']
eot_token = self.tokenizer.encoder['<|endoftext|>']
all_tokens = [[sot_token] + self.tokenizer.encode(text) + [eot_token]
for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
st = torch.randint(len(tokens) - context_length + 1,
(1, ))[0].item()
tokens = tokens[st:st + context_length]
result[i, :len(tokens)] = torch.tensor(tokens)
return result
def forward(self, text):
text = self.tokenize(text)
text_features = self.clip.encode_text(text)
return text_features
def get_class_weight(original_caption, prompt_prefix='a '):
if isinstance(original_caption, str):
if original_caption == 'coco':
class_names = CocoDataset.METAINFO['classes']
elif original_caption == 'cityscapes':
class_names = CityscapesDataset.METAINFO['classes']
elif original_caption == 'voc':
class_names = VOCDataset.METAINFO['classes']
elif original_caption == 'openimages':
class_names = OpenImagesDataset.METAINFO['classes']
elif original_caption == 'lvis':
class_names = LVISV1Dataset.METAINFO['classes']
else:
if not original_caption.endswith('.'):
original_caption = original_caption + ' . '
original_caption = original_caption.split(' . ')
class_names = list(filter(lambda x: len(x) > 0, original_caption))
# for test.py
else:
class_names = list(original_caption)
text_encoder = CLIPTextEncoder()
text_encoder.eval()
texts = [prompt_prefix + x for x in class_names]
print_log(f'Computing text embeddings for {len(class_names)} classes.')
embeddings = text_encoder(texts).detach().permute(1, 0).contiguous().cpu()
return class_names, embeddings
def reset_cls_layer_weight(roi_head, weight):
if type(weight) == str:
print_log(f'Resetting cls_layer_weight from file: {weight}')
zs_weight = torch.tensor(
np.load(weight),
dtype=torch.float32).permute(1, 0).contiguous() # D x C
else:
zs_weight = weight
zs_weight = torch.cat(
[zs_weight, zs_weight.new_zeros(
(zs_weight.shape[0], 1))], dim=1) # D x (C + 1)
zs_weight = F.normalize(zs_weight, p=2, dim=0)
zs_weight = zs_weight.to('cuda')
num_classes = zs_weight.shape[-1]
for bbox_head in roi_head.bbox_head:
bbox_head.num_classes = num_classes
del bbox_head.fc_cls.zs_weight
bbox_head.fc_cls.zs_weight = zs_weight
| @MODELS.register_module() | 2 | 2023-12-11 15:23:03+00:00 | 12k |
merlresearch/PixPNet | pixpnet/protonets/prp/prp.py | [
{
"identifier": "AdaptiveAvgPool2DWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class AdaptiveAvgPool2DWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module, eps):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward method.\n \"\"\"\n\n def configvalues_totensorlist(module, device):\n\n propertynames = [\"output_size\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=device)\n elif isinstance(v, tuple):\n v = torch.tensor(v, dtype=torch.int32, device=device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module, x.device)\n epstensor = torch.tensor([eps], dtype=torch.float32, device=x.device)\n ctx.save_for_backward(x, epstensor, *values) # *values unpacks the list\n\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, epstensor, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"output_size\"]\n # idea: paramsdict={ n: values[i]\n # for i,n in enumerate(propertynames) }\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n #######################################################################\n paramsdict = tensorlist_todict(values)\n eps = epstensor.item()\n\n # class instantiation\n layerclass = torch.nn.AdaptiveAvgPool2d(**paramsdict)\n\n X = input_.clone().detach().requires_grad_(True)\n R = lrp_backward(_input=X, layer=layerclass, relevance_output=grad_output[0], eps0=eps, eps=eps)\n\n return R, None, None"
},
{
"identifier": "Conv2DBeta0WrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class Conv2DBeta0WrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module, lrpignorebias):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward method.\n \"\"\"\n\n def configvalues_totensorlist(module):\n propertynames = [\"in_channels\", \"out_channels\", \"kernel_size\", \"stride\", \"padding\", \"dilation\", \"groups\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=module.weight.device)\n elif isinstance(v, tuple):\n ################\n ################\n # FAILMODE: if it is not a tuple of ints but e.g. a tuple of floats, or a tuple of a tuple\n\n v = torch.tensor(v, dtype=torch.int32, device=module.weight.device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module)\n\n if module.bias is None:\n bias = None\n else:\n bias = module.bias.data.clone()\n lrpignorebiastensor = torch.tensor([lrpignorebias], dtype=torch.bool, device=module.weight.device)\n ctx.save_for_backward(\n x, module.weight.data.clone(), bias, lrpignorebiastensor, *values\n ) # *values unpacks the list\n\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, conv2dweight, conv2dbias, lrpignorebiastensor, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"in_channels\", \"out_channels\", \"kernel_size\", \"stride\", \"padding\", \"dilation\", \"groups\"]\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n #######################################################################\n paramsdict = tensorlist_todict(values)\n\n if conv2dbias is None:\n module = nn.Conv2d(**paramsdict, bias=False)\n else:\n module = nn.Conv2d(**paramsdict, bias=True)\n module.bias = torch.nn.Parameter(conv2dbias)\n\n module.weight = torch.nn.Parameter(conv2dweight)\n\n pnconv = PosNegConv(module, ignorebias=lrpignorebiastensor.item())\n\n X = input_.clone().detach().requires_grad_(True)\n R = lrp_backward(_input=X, layer=pnconv, relevance_output=grad_output[0], eps0=1e-12, eps=0)\n\n return R, None, None"
},
{
"identifier": "CosineDistLRPClass",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class CosineDistLRPClass(torch.autograd.Function):\n @staticmethod\n def forward(ctx, conv_features, model):\n ctx.save_for_backward(conv_features, model.prototype_vectors)\n if VERBOSE:\n print(\"cosine custom forward\")\n\n # An alternative distance metric used in TesNet. Alternative to\n # l2_convolution\n x = F.normalize(conv_features, p=2, dim=1)\n prototype_vectors = F.normalize(model.prototype_vectors, p=2, dim=1)\n similarities = F.conv2d(input=x, weight=prototype_vectors)\n # clip similarities in the range [-1, +1] (numerical error can\n # cause similarities to be outside this range)\n similarities = torch.clamp(similarities, -1, 1)\n distances = 1 - similarities # bounded [0, 2]\n\n similarities = torch.log((distances + 1) / (distances + model.epsilon))\n\n return similarities\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the\n loss with respect to the output, and we need to compute the gradient of\n the loss with respect to the input.\n \"\"\"\n if VERBOSE:\n print(\"cosine custom backward\")\n conv, prototypes = ctx.saved_tensors\n i = conv.shape[2]\n j = conv.shape[3]\n c = conv.shape[1]\n p = prototypes.shape[0]\n\n # Broadcast conv to Nxsize(conv) (No. of prototypes)\n conv = conv.repeat(p, 1, 1, 1) # NP x D x Hz x Wz\n prototype = prototypes.repeat(1, 1, i, j) # P x D x Hz x Wz\n\n conv = conv.squeeze() # think this does nothing\n\n cosine_dists = 1 - F.normalize(prototype, p=2, dim=1) * F.normalize(conv, p=2, dim=1)\n d = 1 / (cosine_dists**2 + 1e-12)\n\n denom = torch.sum(d, dim=1, keepdim=True) + 1e-12\n denom = denom.repeat(1, c, 1, 1) + 1e-12\n R = torch.div(d, denom)\n\n grad_output = grad_output.repeat(c, 1, 1, 1)\n grad_output = grad_output.permute(1, 0, 2, 3)\n\n R = R * grad_output\n\n R = torch.sum(R, dim=0)\n\n R = torch.unsqueeze(R, dim=0)\n\n return R, None, None"
},
{
"identifier": "EltwiseSumStacked2EpsWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class EltwiseSumStacked2EpsWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, stackedx, module, eps):\n epstensor = torch.tensor([eps], dtype=torch.float32, device=stackedx.device)\n ctx.save_for_backward(stackedx, epstensor)\n return module.forward(stackedx)\n\n @staticmethod\n def backward(ctx, grad_output):\n stackedx, epstensor = ctx.saved_tensors\n\n X = stackedx.clone().detach().requires_grad_(True)\n\n eps = epstensor.item()\n\n s2 = SumStacked2().to(X.device)\n Rtmp = lrp_backward(_input=X, layer=s2, relevance_output=grad_output[0], eps0=eps, eps=eps)\n\n return Rtmp, None, None"
},
{
"identifier": "L2LRPClass",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class L2LRPClass(torch.autograd.Function):\n @staticmethod\n def forward(ctx, conv_features, model):\n # *values unpacks the list\n ctx.save_for_backward(conv_features, model.prototype_vectors)\n if VERBOSE:\n print(\"l2 custom forward\")\n x2 = conv_features**2\n x2_patch_sum = F.conv2d(input=x2, weight=model.ones)\n\n p2 = model.prototype_vectors**2\n p2 = torch.sum(p2, dim=(1, 2, 3))\n # p2 is a vector of shape (num_prototypes,)\n # then we reshape it to (num_prototypes, 1, 1)\n p2_reshape = p2.view(-1, 1, 1)\n\n xp = F.conv2d(input=conv_features, weight=model.prototype_vectors)\n intermediate_result = -2 * xp + p2_reshape # use broadcast\n # x2_patch_sum and intermediate_result are of the same shape\n distances = F.relu(x2_patch_sum + intermediate_result)\n\n similarities = torch.log((distances + 1) / (distances + model.epsilon))\n\n return similarities\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the\n loss with respect to the output, and we need to compute the gradient of\n the loss with respect to the input.\n \"\"\"\n if VERBOSE:\n print(\"l2 custom backward\")\n conv, prototypes = ctx.saved_tensors\n i = conv.shape[2]\n j = conv.shape[3]\n c = conv.shape[1]\n p = prototypes.shape[0]\n\n # Broadcast conv to Nxsize(conv) (No. of prototypes)\n conv = conv.repeat(p, 1, 1, 1)\n prototype = prototypes.repeat(1, 1, i, j)\n\n conv = conv.squeeze()\n\n l2 = (conv - prototype) ** 2\n d = 1 / (l2**2 + 1e-12)\n\n denom = torch.sum(d, dim=1, keepdim=True) + 1e-12\n denom = denom.repeat(1, c, 1, 1) + 1e-12\n R = torch.div(d, denom)\n\n grad_output = grad_output.repeat(c, 1, 1, 1)\n grad_output = grad_output.permute(1, 0, 2, 3)\n\n R = R * grad_output\n\n R = torch.sum(R, dim=0)\n\n R = torch.unsqueeze(R, dim=0)\n\n return R, None, None"
},
{
"identifier": "LinearLayerEpsWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class LinearLayerEpsWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module, eps):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward\n method.\n \"\"\"\n\n def configvalues_totensorlist(module):\n\n propertynames = [\"in_features\", \"out_features\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=module.weight.device)\n elif isinstance(v, tuple):\n ################\n ################\n # FAILMODE: if it is not a tuple of ints but e.g. a tuple\n # of floats, or a tuple of a tuple\n\n v = torch.tensor(v, dtype=torch.int32, device=module.weight.device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module)\n epstensor = torch.tensor([eps], dtype=torch.float32, device=x.device)\n\n if module.bias is None:\n bias = None\n else:\n bias = module.bias.data.clone()\n ctx.save_for_backward(x, module.weight.data.clone(), bias, epstensor, *values) # *values unpacks the list\n\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, weight, bias, epstensor, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"in_features\", \"out_features\"]\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n #######################################################################\n paramsdict = tensorlist_todict(values)\n\n if bias is None:\n module = nn.Linear(**paramsdict, bias=False)\n else:\n module = nn.Linear(**paramsdict, bias=True)\n module.bias = torch.nn.Parameter(bias)\n\n module.weight = torch.nn.Parameter(weight)\n\n eps = epstensor.item()\n X = input_.clone().detach().requires_grad_(True)\n R = lrp_backward(_input=X, layer=module, relevance_output=grad_output[0], eps0=eps, eps=eps)\n\n return R, None, None"
},
{
"identifier": "MaxPool2DWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class MaxPool2DWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward\n method.\n \"\"\"\n\n def configvalues_totensorlist(module, device):\n\n propertynames = [\"kernel_size\", \"stride\", \"padding\", \"dilation\", \"return_indices\", \"ceil_mode\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, bool):\n v = torch.tensor([v], dtype=torch.bool, device=device)\n elif isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=device)\n elif isinstance(v, bool):\n\n v = torch.tensor([v], dtype=torch.int32, device=device)\n elif isinstance(v, tuple):\n ################\n ################\n # FAILMODE: if it is not a tuple of ints but e.g. a tuple\n # of floats, or a tuple of a tuple\n\n v = torch.tensor(v, dtype=torch.int32, device=device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module, x.device)\n ctx.save_for_backward(x, *values) # *values unpacks the list\n\n if VERBOSE:\n print(\"maxpool2d custom forward\")\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"kernel_size\", \"stride\", \"padding\", \"dilation\", \"return_indices\", \"ceil_mode\"]\n # idea: paramsdict={ n: values[i]\n # for i,n in enumerate(propertynames) }\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n paramsdict = tensorlist_todict(values)\n\n layerclass = torch.nn.MaxPool2d(**paramsdict)\n\n X = input_.clone().detach().requires_grad_(True)\n with torch.enable_grad():\n Z = layerclass.forward(X)\n relevance_output_data = grad_output[0].clone().detach().unsqueeze(0)\n Z.backward(relevance_output_data)\n R = X.grad\n\n return R, None"
},
{
"identifier": "ReluWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class ReluWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module):\n # stash module config params and trainable params\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, None"
},
{
"identifier": "SigmoidWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class SigmoidWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module):\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, None"
},
{
"identifier": "SumStacked2",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class SumStacked2(nn.Module):\n def __init__(self):\n super(SumStacked2, self).__init__()\n\n @staticmethod\n def forward(x): # from X=torch.stack([X0, X1], dim=0)\n assert x.shape[0] == 2\n return torch.sum(x, dim=0)"
},
{
"identifier": "bnafterconv_overwrite_intoconv",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "def bnafterconv_overwrite_intoconv(conv, bn): # after visatt\n\n if VERBOSE:\n print(conv, bn)\n\n assert isinstance(bn, nn.BatchNorm2d)\n assert isinstance(conv, nn.Conv2d)\n\n s = (bn.running_var + bn.eps) ** 0.5\n w = bn.weight\n b = bn.bias\n m = bn.running_mean\n conv.weight = torch.nn.Parameter(conv.weight * (w / s).reshape(-1, 1, 1, 1))\n\n if conv.bias is None:\n conv.bias = torch.nn.Parameter((0 - m) * (w / s) + b)\n else:\n conv.bias = torch.nn.Parameter((conv.bias - m) * (w / s) + b)\n return conv"
},
{
"identifier": "get_lrpwrapperformodule",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "def get_lrpwrapperformodule(module, lrp_params, lrp_layer2method, thisis_inputconv_andiwant_zbeta=False):\n if isinstance(module, nn.ReLU):\n key = \"nn.ReLU\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, nn.Sigmoid):\n key = \"nn.Sigmoid\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, nn.BatchNorm2d):\n\n key = \"nn.BatchNorm2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, nn.Linear):\n\n key = \"nn.Linear\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default linearlayer_eps_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"linear_eps\"])\n\n elif isinstance(module, nn.Conv2d):\n if thisis_inputconv_andiwant_zbeta:\n return Conv2DZBetaWrapperClass(module, lrp_params[\"conv2d_ignorebias\"])\n else:\n key = \"nn.Conv2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\n \"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key\n )\n\n # default conv2d_beta0_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(\n module, autogradfunction=autogradfunction, parameter1=lrp_params[\"conv2d_ignorebias\"]\n )\n\n elif isinstance(module, nn.AdaptiveAvgPool2d):\n\n key = \"nn.AdaptiveAvgPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default adaptiveavgpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"pooling_eps\"])\n\n elif isinstance(module, nn.AvgPool2d):\n\n key = \"nn.AvgPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default adaptiveavgpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"pooling_eps\"])\n\n elif isinstance(module, nn.MaxPool2d):\n\n key = \"nn.MaxPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default maxpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, SumStacked2): # resnet specific\n\n key = \"sum_stacked2\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default eltwisesum_stacked2_eps_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"eltwise_eps\"])\n\n elif isinstance(module, ClampLayer): # densenet specific\n\n key = \"clamplayer\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, TensorBiasedLinearLayer): # densenet specific\n\n key = \"tensorbiased_linearlayer\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"linear_eps\"])\n\n elif isinstance(module, TensorBiasedConvLayer): # densenet specific\n\n key = \"tensorbiased_convlayer\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(\n module, autogradfunction=autogradfunction, parameter1=lrp_params[\"conv2d_ignorebias\"]\n )\n\n else:\n key = \"nn.MaxPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default maxpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n print(\"found no lookup for this module:\", module)\n raise LRLookupNotFoundError(\"found no lookup for this module:\", module)"
},
{
"identifier": "resetbn",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "def resetbn(bn):\n assert isinstance(bn, nn.BatchNorm2d)\n\n bnc = copy.deepcopy(bn)\n bnc.reset_parameters()\n\n return bnc"
},
{
"identifier": "BasicBlock",
"path": "pixpnet/protonets/prp/resnet_features.py",
"snippet": "class BasicBlock(nn.Module):\n # class attribute\n expansion = 1\n num_layers = 2\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n # only conv with possibly not 1 stride\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n\n # if stride is not 1 then self.downsample cannot be None\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n # the residual connection\n out += identity\n out = self.relu(out)\n\n return out\n\n def block_conv_info(self):\n block_kernel_sizes = [3, 3]\n block_strides = [self.stride, 1]\n block_paddings = [1, 1]\n\n return block_kernel_sizes, block_strides, block_paddings"
},
{
"identifier": "Bottleneck",
"path": "pixpnet/protonets/prp/resnet_features.py",
"snippet": "class Bottleneck(nn.Module):\n # class attribute\n expansion = 4\n num_layers = 3\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = conv1x1(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n # only conv with possibly not 1 stride\n self.conv2 = conv3x3(planes, planes, stride)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = conv1x1(planes, planes * self.expansion)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n\n # if stride is not 1 then self.downsample cannot be None\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n def block_conv_info(self):\n block_kernel_sizes = [1, 3, 1]\n block_strides = [1, self.stride, 1]\n block_paddings = [0, 1, 0]\n\n return block_kernel_sizes, block_strides, block_paddings"
},
{
"identifier": "ResNetFeatures",
"path": "pixpnet/protonets/prp/resnet_features.py",
"snippet": "class ResNetFeatures(nn.Module):\n \"\"\"\n the convolutional layers of ResNet\n the average pooling and final fully convolutional layer is removed\n \"\"\"\n\n def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):\n super(ResNetFeatures, self).__init__()\n\n self.inplanes = 64\n\n # the first convolutional layer before the structured sequence of blocks\n # self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n # bias=False)\n self.conv1_no_act = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.conv1 = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n # comes from the first conv and the following max pool\n self.kernel_sizes = [7, 3]\n self.strides = [2, 2]\n self.paddings = [3, 1]\n\n # the following layers, each layer is a sequence of blocks\n self.block = block\n self.layers = layers\n self.layer1 = self._make_layer(block=block, planes=64, num_blocks=self.layers[0])\n self.layer2 = self._make_layer(block=block, planes=128, num_blocks=self.layers[1], stride=2)\n self.layer3 = self._make_layer(block=block, planes=256, num_blocks=self.layers[2], stride=2)\n self.layer4 = self._make_layer(block=block, planes=512, num_blocks=self.layers[3], stride=2)\n\n # initialize the parameters\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual\n # block behaves like an identity.\n # This improves the model by 0.2~0.3% according to\n # https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = [block(self.inplanes, planes, stride, downsample)]\n # only the first block has downsample that is possibly not None\n\n self.inplanes = planes * block.expansion\n for _ in range(1, num_blocks):\n layers.append(block(self.inplanes, planes))\n\n # keep track of every block's conv size, stride size, and padding size\n for each_block in layers:\n block_kernel_sizes, block_strides, block_paddings = each_block.block_conv_info()\n self.kernel_sizes.extend(block_kernel_sizes)\n self.strides.extend(block_strides)\n self.paddings.extend(block_paddings)\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1_no_act(x)\n x = self.bn1(x)\n x = self.conv1(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x\n\n def conv_info(self):\n return self.kernel_sizes, self.strides, self.paddings\n\n def num_layers(self):\n \"\"\"\n the number of conv layers in the network, not counting the number\n of bypass layers\n \"\"\"\n return (\n self.block.num_layers * self.layers[0]\n + self.block.num_layers * self.layers[1]\n + self.block.num_layers * self.layers[2]\n + self.block.num_layers * self.layers[3]\n + 1\n )\n\n def __repr__(self):\n template = \"resnet{}_features\"\n return template.format(self.num_layers() + 1)"
}
] | import copy
import torch
from collections import OrderedDict
from torch import nn
from torchvision import datasets
from pixpnet.protonets.prp.lrp_general6 import (
AdaptiveAvgPool2DWrapperFct,
Conv2DBeta0WrapperFct,
CosineDistLRPClass,
EltwiseSumStacked2EpsWrapperFct,
L2LRPClass,
LinearLayerEpsWrapperFct,
MaxPool2DWrapperFct,
ReluWrapperFct,
SigmoidWrapperFct,
SumStacked2,
bnafterconv_overwrite_intoconv,
get_lrpwrapperformodule,
resetbn,
)
from pixpnet.protonets.prp.resnet_features import BasicBlock, Bottleneck, ResNetFeatures | 10,124 | """
Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
Copyright (c) 2022 Srishti Gautam, Marina Hohne, Robert Jenssen, Michael Kampffmeyer
SPDX-License-Identifier: AGPL-3.0-or-later
SPDX-License-Identifier: MIT
"""
def imshow_im(hm, q=100):
hm = hm.squeeze().sum(dim=0).detach()
return hm
# partial replacement of BN, use own classes, no pretrained loading
class TorchModuleNotFoundError(Exception):
pass
class BasicBlockFused(BasicBlock):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlockFused, self).__init__(inplanes, planes, stride, downsample)
# own
self.elt = SumStacked2() # eltwisesum2()
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.elt(torch.stack([out, identity], dim=0)) # self.elt(out,identity)
out = self.relu(out)
return out
| """
Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
Copyright (c) 2022 Srishti Gautam, Marina Hohne, Robert Jenssen, Michael Kampffmeyer
SPDX-License-Identifier: AGPL-3.0-or-later
SPDX-License-Identifier: MIT
"""
def imshow_im(hm, q=100):
hm = hm.squeeze().sum(dim=0).detach()
return hm
# partial replacement of BN, use own classes, no pretrained loading
class TorchModuleNotFoundError(Exception):
pass
class BasicBlockFused(BasicBlock):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlockFused, self).__init__(inplanes, planes, stride, downsample)
# own
self.elt = SumStacked2() # eltwisesum2()
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.elt(torch.stack([out, identity], dim=0)) # self.elt(out,identity)
out = self.relu(out)
return out
| class BottleneckFused(Bottleneck): | 14 | 2023-12-06 23:49:31+00:00 | 12k |
dvmazur/mixtral-offloading | src/build_model.py | [
{
"identifier": "ExpertCache",
"path": "src/expert_cache.py",
"snippet": "class ExpertCache:\n def __init__(self, make_module: callable, main_size: int, offload_size: int, buffer_size: int):\n \"\"\"Dynamically loads an array of modules with identical hyperparameters\"\"\"\n self.module_type = self.module_size = self.device = None\n self.active = False\n\n self.registered_experts: Dict[ExpertUID, ExpertInfo] = dict()\n\n self.main_modules = [self._check_module(make_module()) for i in range(main_size)]\n self.main_infos: List[Optional[ExpertInfo]] = [None for _ in range(main_size)]\n\n assert self.module_size is not None\n self.offloaded_storages = [\n torch.UntypedStorage(self.module_size).pin_memory(self.device) for _ in range(offload_size)]\n self.offloaded_infos: List[Optional[ExpertInfo]] = [None for _ in range(offload_size)]\n\n # temporary storage to shave off latency\n self.device_expert_buffers = deque([self._check_module(make_module()) for _ in range(buffer_size)])\n self.offloaded_storage_buffers = deque([\n torch.UntypedStorage(self.module_size).pin_memory(self.device) for _ in range(buffer_size)])\n self.group_infos: Dict[int, EvictionGroupInfo] = defaultdict(EvictionGroupInfo)\n\n def _check_module(self, module: MixtralExpertWrapper):\n assert isinstance(module.storage, torch.UntypedStorage)\n if self.module_type is None:\n self.module_type = type(module)\n self.module_size = len(module.storage)\n self.device = module.storage.device\n else:\n assert isinstance(module, self.module_type)\n assert len(module.storage) == self.module_size\n assert module.storage.device == self.device\n return module\n\n def add_expert(self, uid: ExpertUID, module: MixtralExpertWrapper, eviction_group: int = 0,\n offload: Optional[bool] = None):\n \"\"\"Register an expert to the cache and associate it with uid\"\"\"\n assert self.module_type is not None\n assert isinstance(module, self.module_type)\n return self.add_expert_storage(uid, module.storage, eviction_group=eviction_group, offload=offload)\n\n def add_expert_storage(self, uid: ExpertUID, storage: torch.UntypedStorage,\n eviction_group: int = 0, offload: Optional[bool] = None):\n assert uid not in self.registered_experts, f\"expert {uid} already registered\"\n assert isinstance(storage, torch.UntypedStorage)\n assert len(storage) == self.module_size\n\n if offload is None or not offload: # False or None\n for i in range(len(self.main_modules)):\n if self.main_infos[i] is None:\n self.main_modules[i].storage.copy_(storage)\n info = ExpertInfo(uid, eviction_group=eviction_group, offloaded=False, index=i)\n self.registered_experts[uid] = self.main_infos[i] = info\n self.group_infos[eviction_group].add(info)\n return # done allocating; found spot on device\n if offload is None or offload: # True or None\n for i in range(len(self.offloaded_storages)):\n if self.offloaded_infos[i] is None:\n self.offloaded_storages[i].copy_(storage)\n info = ExpertInfo(uid, eviction_group=eviction_group, offloaded=True, index=i)\n self.registered_experts[uid] = self.offloaded_infos[i] = info\n self.group_infos[eviction_group].add(info)\n return # done allocating; found an offloaded spot\n raise ValueError(\"Cache is full\")\n\n def load_experts(\n self, *uids: ExpertUID, unordered: bool = False) -> Iterator[Tuple[ExpertUID, MixtralExpertWrapper]]:\n \"\"\"\n :example:\n >>> for uid, expert in expert_cache.load_experts(*list_of_uids, unordered=True):\n >>> for uid, expert in expert_iter:\n >>> result += expert(x) * get_moe_weight(uid)\n\n :param uids: iterate over the specified expert uids. Same uids as in add_expert\n :param unordered: if True, allows cache to iterate experts in arbitrary order\n The order is chosen to minimize the total wait time.\n :returns: an iterator that yields (uid, expert) pairs, only usable inside the for loop\n\n \"\"\"\n assert len(set(uids)) == len(uids)\n assert not self.active, \"already loading experts; buffers are busy\"\n if unordered: # yield non-offloaded experts first\n uids = sorted(uids, key=lambda uid: self.registered_experts[uid].offloaded)\n infos = [self.registered_experts[uid] for uid in uids]\n\n assert len(set(info.eviction_group for info in infos)) == 1, \"experts must be in the same evicton group\"\n eviction_group = self.group_infos[infos[0].eviction_group]\n for info in infos:\n eviction_group.mark_used(info)\n\n try:\n self.active = True\n # save pre-loaded experts before they can be swapped\n pre_loaded_infos = deque([info for info in infos if not info.offloaded])\n pre_loaded_experts = deque([self.main_modules[info.index] for info in pre_loaded_infos])\n\n # begin loading experts into free buffers in background (via non-blocking copy)\n infos_to_load = deque([info for info in infos if info.offloaded])\n infos_in_loading = deque([])\n experts_in_loading = deque([])\n window_size = min(len(self.device_expert_buffers) - 1,\n len(eviction_group.main_infos),\n len(infos_to_load))\n for _ in range(window_size):\n info_to_load = infos_to_load.popleft()\n infos_in_loading.append(info_to_load)\n experts_in_loading.append(\n self._swap(info_to_load, eviction_group.choose_expert_to_evict()))\n\n for info in infos:\n if len(pre_loaded_infos) > 0 and info is pre_loaded_infos[0]:\n pre_loaded_infos.popleft()\n yield (info.uid, pre_loaded_experts.popleft())\n elif len(infos_in_loading) > 0 and info is infos_in_loading[0]:\n infos_in_loading.popleft()\n yield (info.uid, experts_in_loading.popleft())\n if len(infos_to_load) > 0:\n info_to_load = infos_to_load.popleft()\n infos_in_loading.append(info_to_load)\n experts_in_loading.append(\n self._swap(info_to_load, eviction_group.choose_expert_to_evict()))\n else:\n raise RuntimeError(\"internal error: caching algorithm failed\")\n finally:\n self.active = False\n\n def _swap(self, info_to_load: ExpertInfo, info_to_evict: ExpertInfo) -> nn.Module:\n \"\"\"Swap an offloaded expert (info_to_load) with an on-device expert (info_to_evict) return the loaded expert\"\"\"\n assert info_to_load.offloaded and not info_to_evict.offloaded\n assert info_to_load.eviction_group == info_to_evict.eviction_group\n # swap a single on-device expert with a single offloaded expert using buffers for parallelism\n offloaded_storage_buffer = self.offloaded_storage_buffers.popleft()\n device_expert_buffer = self.device_expert_buffers.popleft()\n device_expert_buffer.storage.copy_(self.offloaded_storages[info_to_load.index], non_blocking=True)\n offloaded_storage_buffer.copy_(self.main_modules[info_to_evict.index].storage, non_blocking=True)\n\n self.device_expert_buffers.append(self.main_modules[info_to_evict.index])\n self.main_modules[info_to_evict.index] = device_expert_buffer\n self.offloaded_storage_buffers.append(self.offloaded_storages[info_to_load.index])\n self.offloaded_storages[info_to_load.index] = offloaded_storage_buffer\n\n self.main_infos[info_to_evict.index] = info_to_load\n self.offloaded_infos[info_to_load.index] = info_to_evict\n info_to_evict.offloaded, info_to_load.offloaded = info_to_load.offloaded, info_to_evict.offloaded\n info_to_evict.index, info_to_load.index = info_to_load.index, info_to_evict.index\n self.group_infos[info_to_load.eviction_group].swap(info_to_load, info_to_evict)\n return device_expert_buffer"
},
{
"identifier": "MixtralExpertWrapper",
"path": "src/expert_wrapper.py",
"snippet": "class MixtralExpertWrapper(nn.Module):\n def __init__(\n self,\n expert_module: tp.Any,\n device: torch.device,\n ):\n super().__init__()\n \n expert_module, self.storage = self.replace_layer_storage(expert_module, device)\n self.expert_module = lambda *args, **kwargs: expert_module(*args, **kwargs)\n \n self._register_state_dict_hook(self._add_storage_to_state_dict_hook)\n self._register_load_state_dict_pre_hook(self._load_storage_from_state_dict_hook)\n \n @staticmethod\n def _add_storage_to_state_dict_hook(self, state_dict, prefix, local_metadata):\n state_dict[prefix + 'storage'] = torch.as_tensor(self.storage, dtype=torch.uint8)\n return state_dict\n \n def _load_storage_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n self.storage.copy_(state_dict[prefix + 'storage'].storage().untyped())\n del state_dict[prefix + 'storage']\n \n def forward(self, *args, **kwargs):\n return self.expert_module(*args, **kwargs)\n \n \n @staticmethod\n def replace_layer_storage(\n layer: tp.Any,\n device: torch.device,\n ):\n state_dict = {\n f\"w{i}\": {\n \"W_q\": getattr(layer, f\"w{i}\").W_q,\n \"meta\": getattr(layer, f\"w{i}\").meta,\n \"bias\": getattr(layer, f\"w{i}\").bias,\n }\n for i in range(1, 4)\n }\n\n storage_size = 0\n offsets = [0]\n\n for x in nested_flatten(state_dict):\n if not isinstance(x, torch.Tensor):\n continue\n storage_size += x.nbytes\n offsets.append(storage_size)\n\n storage = torch.UntypedStorage(storage_size, device=device) \n\n i = 0\n new_flattened_states = list()\n for x in nested_flatten(state_dict):\n if not isinstance(x, torch.Tensor):\n new_flattened_states.append(x)\n continue\n\n start = offsets[i]\n end = offsets[i + 1]\n a_view = torch.as_tensor(storage[start:end], dtype=x.dtype, device=device).view(x.shape)\n a_view[...] = x\n assert a_view.data_ptr() == storage.data_ptr() + start\n i += 1\n new_flattened_states.append(a_view)\n\n state_dict = nested_pack(new_flattened_states, state_dict)\n\n for layer_id, states in state_dict.items():\n patched = getattr(layer, layer_id)\n patched.W_q = states[\"W_q\"]\n patched.meta = states[\"meta\"]\n patched.bias = states[\"bias\"]\n setattr(layer, layer_id, patched)\n\n return layer, storage"
},
{
"identifier": "HQQLinearTritonSavable",
"path": "src/custom_layers.py",
"snippet": "class HQQLinearTritonSavable(HQQLinear):\n def __init__(self, layer, quant_config, meta=None, **kwargs):\n \"\"\"\n Example how to get meta:\n >>>> meta1 = HQQLinearSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)\n >>>> meta2 = HQQLinearSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)\n \"\"\"\n \n assert quant_config['weight_quant_params']['nbits'] in [2, 3, 4]\n \n super().__init__(layer, quant_config, **kwargs)\n \n if not hasattr(self, 'meta'):\n assert meta is not None\n self.meta = copy.deepcopy(meta)\n \n self._register_state_dict_hook(self._add_to_state_dict_hook)\n self._register_load_state_dict_pre_hook(self._load_from_state_dict_hook)\n \n def quantize(self, *args, **kwargs):\n super().quantize(*args, **kwargs)\n \n # repacking\n self.repack()\n \n def repack(self):\n if self.W_q.shape != self.meta['shape']:\n W_q = Quantizer.unpack[self.meta['packing']](self.W_q)\n sh = self.meta['shape']\n W_q = W_q.reshape((-1,) + sh[1:])\n W_q = W_q[:sh[0], ...]\n self.W_q = Quantizer.pack[self.meta['packing']](W_q)\n \n def forward(self, x):\n return self.forward_triton(x)\n \n def set_backend(self, backend):\n pass\n \n @torch.inference_mode()\n def forward_triton(self, x):\n assert self.ready, \"model was not quantized\"\n assert self.meta['axis'] == 0\n\n W_q, meta = self.W_q, self.meta\n\n del_keys = []\n if 'quant_scale' in meta and meta['quant_scale']:\n meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale')\n if 'quant_zero' in meta and meta['quant_zero']:\n meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero')\n\n K = meta['shape'][1]\n N = meta['shape'][0]\n \n if self.meta['nbits'] == 4:\n fn = triton_matmul4_transpose\n elif self.meta['nbits'] == 3:\n fn = functools.partial(triton_matmul3_transpose, N=N)\n elif self.meta['nbits'] == 2:\n fn = triton_matmul2_transpose\n else:\n raise RuntimeError(f\"nbits == {self.meta['nbits']} isn't yet supported\")\n \n output = fn(\n meta['group_size'], x,\n W_q.view(-1, K),\n meta['scale'].view(-1, K),\n meta['zero'].view(-1, K),\n bias=self.bias if hasattr(self, 'bias') else None,\n )\n\n #Cleanup\n for key in del_keys:\n del meta[key]\n\n return output\n\n # to support .forward_pytorch(...) - backward compatibility\n @torch.inference_mode()\n def dequantize(self):\n assert self.ready, \"model was not quantized\"\n W_q, meta = self.W_q, self.meta\n del_keys = []\n if(meta['quant_scale']):\n meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale')\n if(meta['quant_zero']):\n meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero')\n \n W_q_p = Quantizer.unpack[meta['packing']](W_q).half()\n W_q_p = W_q_p[:meta['shape'][0], ...]\n W_q_p = W_q_p.reshape((meta['group_size'], -1))\n \n if((meta['group_size'] is not None) and (meta['nbits']==3)):\n W_q_p = W_q_p[:meta['group_size']] if (meta['axis']==0) else W_q_p[:,:meta['group_size']]\n W_est = ((W_q_p - meta['zero'])*meta['scale']).reshape(meta['shape']) \n \n #Cleanup\n del W_q_p\n for key in del_keys: del meta[key]\n return W_est\n \n @classmethod\n def get_hqq_meta(cls, linear_shape, quant_config):\n layer = HQQLinear(nn.Linear(*linear_shape, bias=False), quant_config)\n meta = layer.meta\n\n def _remove_tensors_recursive(d):\n keys = list(d.keys())\n\n for k in keys:\n if isinstance(d[k], torch.Tensor):\n del d[k]\n elif isinstance(d[k], dict):\n _remove_tensors_recursive(d[k])\n\n _remove_tensors_recursive(meta)\n\n return meta\n \n @staticmethod\n def _add_to_state_dict_hook(self, state_dict, prefix, local_metadata):\n tensor_paths = self._get_tensor_paths(self.meta)\n assert set(tensor_paths).issubset(\n {'scale_q', 'meta_scale.scale', 'meta_scale.zero', 'zero_q', 'meta_zero.scale', 'meta_zero.zero',\n 'scale', 'zero'}\n )\n \n def _add(name, value):\n state_dict[prefix + name] = value\n \n _add('W_q', self.W_q)\n \n if self.bias is not None:\n _add('bias', self.bias)\n \n if 'meta_scale' in self.meta:\n _add('meta.scale_q', self.meta['scale_q'])\n _add('meta.meta_scale.scale', self.meta['meta_scale']['scale'])\n _add('meta.meta_scale.zero', self.meta['meta_scale']['zero'])\n else:\n _add('meta.scale', self.meta['scale'])\n \n if 'meta_zero' in self.meta:\n _add('meta.zero_q', self.meta['zero_q'])\n _add('meta.meta_zero.scale', self.meta['meta_zero']['scale'])\n _add('meta.meta_zero.zero', self.meta['meta_zero']['zero'])\n else:\n _add('meta.zero', self.meta['zero'])\n \n return state_dict\n \n def _load_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n tensor_paths = [k[len(prefix + 'meta.'):] for k in state_dict.keys() if k.startswith(prefix + 'meta.')]\n assert set(tensor_paths).issubset(\n {'scale_q', 'meta_scale.scale', 'meta_scale.zero', 'zero_q', 'meta_zero.scale', 'meta_zero.zero',\n 'scale', 'zero'}\n )\n \n def _del(name):\n del state_dict[prefix + name]\n def _set(name):\n setattr(self, name, state_dict[prefix + name])\n _del(name)\n def _get(name):\n v = state_dict[prefix + name]\n _del(name)\n return v\n \n _set('W_q')\n if 'bias' in state_dict:\n _set('bias')\n else:\n self.bias = None\n \n if not hasattr(self, 'meta'):\n self.meta = {}\n \n if (prefix + 'meta.meta_scale.scale') in state_dict:\n self.meta['scale_q'] = _get('meta.scale_q')\n self.meta['quant_scale'] = True\n if not 'meta_scale' in self.meta:\n self.meta['meta_scale'] = {}\n self.meta['meta_scale'] |= {\n 'scale': _get('meta.meta_scale.scale'),\n 'zero': _get('meta.meta_scale.zero')\n }\n else:\n self.meta['scale'] = _get('meta.scale')\n if (prefix + 'meta.meta_zero.scale') in state_dict:\n self.meta['zero_q'] = _get('meta.zero_q')\n self.meta['quant_zero'] = True\n if not 'meta_zero' in self.meta:\n self.meta['meta_zero'] = {}\n self.meta['meta_zero'] |= {\n 'scale': _get('meta.meta_zero.scale'),\n 'zero': _get('meta.meta_zero.zero')\n }\n else:\n self.meta['zero'] = _get('meta.zero')\n self.ready = True\n \n # self.cuda()\n # self.in_gpu = self.W_q.device.type == 'cuda'\n # assert self.in_gpu\n \n self.repack()\n \n @classmethod\n def _get_tensor_paths(cls, state: Dict[str, Any], prefix=''):\n paths = []\n \n for k, v in state.items():\n if isinstance(v, dict):\n paths += cls._get_tensor_paths(v, prefix=k + '.')\n elif isinstance(v, torch.Tensor):\n paths.append(prefix + k)\n \n return paths\n \n def state_dict(self, *args, **kwargs):\n return nn.Module.state_dict(self, *args, **kwargs)\n \n def load_state_dict(self, *args, **kwargs):\n nn.Module.load_state_dict(self, *args, **kwargs)"
},
{
"identifier": "MixtralBLockSparseTop2MLP_HQQ",
"path": "src/custom_layers.py",
"snippet": "class MixtralBLockSparseTop2MLP_HQQ(nn.Module):\n def __init__(self, config: MixtralConfig, quant_config: Dict[str, Any], meta1, meta2):\n super().__init__()\n \n self.w1 = HQQLinearTritonSavable(None, quant_config, meta1)\n self.w2 = HQQLinearTritonSavable(None, quant_config, meta2)\n self.w3 = HQQLinearTritonSavable(None, quant_config, meta1)\n\n self.act_fn = ACT2FN[config.hidden_act]\n\n def forward(self, hidden_states):\n current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)\n current_hidden_states = self.w2(current_hidden_states)\n return current_hidden_states"
},
{
"identifier": "SparseMoeWrapper",
"path": "src/custom_layers.py",
"snippet": "class SparseMoeWrapper(nn.Module):\n def __init__(self, config, layer_id, gate, expert_cache):\n super().__init__()\n\n self.hidden_dim = config.hidden_size\n self.ffn_dim = config.intermediate_size\n self.num_experts = config.num_local_experts\n self.top_k = config.num_experts_per_tok\n self.layer_id = layer_id\n\n self.gate = gate\n self.experts = expert_cache\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n batch_size, sequence_length, hidden_dim = hidden_states.shape\n hidden_states = hidden_states.view(-1, hidden_dim)\n # router_logits: (batch * sequence_length, n_experts)\n router_logits = self.gate(hidden_states)\n\n routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)\n routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)\n routing_weights /= routing_weights.sum(dim=-1, keepdim=True)\n # we cast back to the input dtype\n routing_weights = routing_weights.to(hidden_states.dtype)\n\n final_hidden_states = torch.zeros(\n (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device\n )\n\n # One hot encode the selected experts to create an expert mask\n # this will be used to easily index which expert is going to be sollicitated\n expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)\n\n active_experts = selected_experts.flatten().unique().tolist()\n\n # Loop over all available experts in the model and perform the computation on each expert\n for (_layer_index, expert_idx), expert_layer in self.experts.load_experts(\n *((self.layer_id, expert_idx) for expert_idx in active_experts), unordered=True):\n idx, top_x = torch.where(expert_mask[expert_idx])\n assert top_x.shape[0] > 0\n\n # in torch it is faster to index using lists than torch tensors\n top_x_list = top_x.tolist()\n idx_list = idx.tolist()\n\n # Index the correct hidden states and compute the expert hidden state for\n # the current expert. We need to make sure to multiply the output hidden\n # states by `routing_weights` on the corresponding tokens (top-1 and top-2)\n current_state = hidden_states[None, top_x_list].reshape(-1, hidden_dim)\n current_hidden_states = expert_layer(current_state) * routing_weights[top_x_list, idx_list, None]\n\n # However `index_add_` only support torch tensors for indexing so we'll use\n # the `top_x` tensor here.\n final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))\n final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)\n return final_hidden_states, router_logits"
},
{
"identifier": "with_default_dtype",
"path": "src/utils.py",
"snippet": "@contextmanager\ndef with_default_dtype(dtype):\n _dtype_original = torch.get_default_dtype()\n\n try:\n torch.set_default_dtype(dtype)\n yield\n finally:\n torch.set_default_dtype(_dtype_original)"
}
] | import os
import json
import typing as tp
import torch
from functools import cache
from dataclasses import dataclass
from torch import nn
from transformers import AutoConfig
from transformers.models.mixtral import MixtralForCausalLM, MixtralConfig
from safetensors.torch import load_file
from torch import nn
from tqdm.auto import trange
from hqq.core.quantize import BaseQuantizeConfig
from .expert_cache import ExpertCache
from .expert_wrapper import MixtralExpertWrapper
from .custom_layers import (
HQQLinearTritonSavable,
MixtralBLockSparseTop2MLP_HQQ,
SparseMoeWrapper,
)
from .utils import with_default_dtype | 7,444 |
for layer in model.model.layers:
layer.block_sparse_moe.gate = nn.Linear(
config.hidden_size,
config.num_local_experts,
dtype=torch.float16,
device=device,
bias=False,
)
layer.self_attn.q_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
layer.self_attn.k_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.v_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.o_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
@cache
def get_default_ffn_quant_config(ffn_dim: int = 14336, hidden_dim: int = 4096):
quant_config = BaseQuantizeConfig(
nbits=2,
group_size=16,
quant_zero=True,
quant_scale=True,
)
meta1 = HQQLinearTritonSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)
meta2 = HQQLinearTritonSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)
return quant_config, meta1, meta2
def make_empty_expert(
model_config: MixtralConfig, quant_config: QuantConfig
) -> MixtralBLockSparseTop2MLP_HQQ:
meta1, meta2 = quant_config.get_ffn_metas(
model_config.hidden_size, model_config.intermediate_size
)
return MixtralBLockSparseTop2MLP_HQQ(
model_config,
quant_config.ffn_config,
meta1,
meta2,
)
def make_and_load_expert_wrapper(
config: MixtralConfig,
quant_config: QuantConfig,
states_dir: str,
expert_uid: tuple[int, int],
device: torch.device,
) -> MixtralExpertWrapper:
layer_idx, expert_idx = expert_uid
index_path = os.path.join(states_dir, "model.safetensors.index.json")
with open(index_path) as f:
module_idx = f"model.layers.{layer_idx}.block_sparse_moe.experts.{expert_idx}"
state_fpath = json.load(f)["weight_map"][f"{module_idx}.w1.W_q"]
state_dict = load_file(os.path.join(states_dir, state_fpath), device=str(device))
expert = make_empty_expert(config, quant_config)
expert.load_state_dict(state_dict, strict=True)
return MixtralExpertWrapper(expert, device)
def load_00_expert_state_dict(states_dir: str, device: torch.device):
index_path = os.path.join(states_dir, "model.safetensors.index.json")
with open(index_path) as f:
module_idx = f"model.layers.0.block_sparse_moe.experts.0"
state_fpath = json.load(f)["weight_map"][f"{module_idx}.w1.W_q"]
return load_file(os.path.join(states_dir, state_fpath), device=str(device))
def build_model(
device: torch.device,
quant_config: QuantConfig,
offload_config: OffloadConfig,
state_path: str,
):
model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
state_dict_00 = load_00_expert_state_dict(state_path, device)
def _make_module():
config = AutoConfig.from_pretrained(model_name)
expert = make_empty_expert(config, quant_config)
expert.load_state_dict(state_dict_00)
return MixtralExpertWrapper(expert, device=device)
with device, with_default_dtype(torch.float16):
model = MixtralForCausalLM(
AutoConfig.from_pretrained(
model_name,
num_local_experts=0,
torch_dtype=torch.float16,
device_map=device,
),
)
model_config = AutoConfig.from_pretrained(model_name)
replace_attn_layers(model, model_config, quant_config, device)
state_index_path = os.path.join(state_path, "model.safetensors.index.json")
with open(state_index_path) as f:
weight_map = json.load(f)["weight_map"]
trunk_state_path = os.path.join(
state_path,
weight_map["model.embed_tokens.weight"],
)
model.load_state_dict(load_file(trunk_state_path, device=str(device)), strict=True)
|
@dataclass(frozen=True)
class OffloadConfig:
main_size: int
offload_size: int
buffer_size: int
offload_per_layer: int
class QuantConfig:
def __init__(
self,
ffn_config: BaseQuantizeConfig,
attn_config: BaseQuantizeConfig,
):
self.ffn_config = ffn_config
self.attn_config = attn_config
@cache
def get_ffn_metas(self, hidden_dim: int, ffn_dim: int) -> tuple[tp.Any, tp.Any]:
return (
HQQLinearTritonSavable.get_hqq_meta((hidden_dim, ffn_dim), self.ffn_config),
HQQLinearTritonSavable.get_hqq_meta((ffn_dim, hidden_dim), self.ffn_config),
)
def replace_attn_layers(
model: MixtralForCausalLM,
config: MixtralConfig,
quant_config: QuantConfig,
device: torch.device,
) -> None:
attn_quant_config = quant_config.attn_config
hidden_size = config.hidden_size
num_heads = config.num_attention_heads
head_dim = hidden_size // num_heads
num_key_value_heads = config.num_key_value_heads
shapes = [
(hidden_size, num_heads * head_dim),
(hidden_size, num_key_value_heads * head_dim),
(hidden_size, num_key_value_heads * head_dim),
(num_heads * head_dim, hidden_size),
]
shape_to_meta = {
shape: HQQLinearTritonSavable.get_hqq_meta(shape, attn_quant_config)
for shape in shapes
}
def patch_fct_hqq(shape, quant_config):
meta = shape_to_meta[shape]
layer = HQQLinearTritonSavable(None, quant_config, meta=meta)
return layer
for layer in model.model.layers:
layer.block_sparse_moe.gate = nn.Linear(
config.hidden_size,
config.num_local_experts,
dtype=torch.float16,
device=device,
bias=False,
)
layer.self_attn.q_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
layer.self_attn.k_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.v_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.o_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
@cache
def get_default_ffn_quant_config(ffn_dim: int = 14336, hidden_dim: int = 4096):
quant_config = BaseQuantizeConfig(
nbits=2,
group_size=16,
quant_zero=True,
quant_scale=True,
)
meta1 = HQQLinearTritonSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)
meta2 = HQQLinearTritonSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)
return quant_config, meta1, meta2
def make_empty_expert(
model_config: MixtralConfig, quant_config: QuantConfig
) -> MixtralBLockSparseTop2MLP_HQQ:
meta1, meta2 = quant_config.get_ffn_metas(
model_config.hidden_size, model_config.intermediate_size
)
return MixtralBLockSparseTop2MLP_HQQ(
model_config,
quant_config.ffn_config,
meta1,
meta2,
)
def make_and_load_expert_wrapper(
config: MixtralConfig,
quant_config: QuantConfig,
states_dir: str,
expert_uid: tuple[int, int],
device: torch.device,
) -> MixtralExpertWrapper:
layer_idx, expert_idx = expert_uid
index_path = os.path.join(states_dir, "model.safetensors.index.json")
with open(index_path) as f:
module_idx = f"model.layers.{layer_idx}.block_sparse_moe.experts.{expert_idx}"
state_fpath = json.load(f)["weight_map"][f"{module_idx}.w1.W_q"]
state_dict = load_file(os.path.join(states_dir, state_fpath), device=str(device))
expert = make_empty_expert(config, quant_config)
expert.load_state_dict(state_dict, strict=True)
return MixtralExpertWrapper(expert, device)
def load_00_expert_state_dict(states_dir: str, device: torch.device):
index_path = os.path.join(states_dir, "model.safetensors.index.json")
with open(index_path) as f:
module_idx = f"model.layers.0.block_sparse_moe.experts.0"
state_fpath = json.load(f)["weight_map"][f"{module_idx}.w1.W_q"]
return load_file(os.path.join(states_dir, state_fpath), device=str(device))
def build_model(
device: torch.device,
quant_config: QuantConfig,
offload_config: OffloadConfig,
state_path: str,
):
model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
state_dict_00 = load_00_expert_state_dict(state_path, device)
def _make_module():
config = AutoConfig.from_pretrained(model_name)
expert = make_empty_expert(config, quant_config)
expert.load_state_dict(state_dict_00)
return MixtralExpertWrapper(expert, device=device)
with device, with_default_dtype(torch.float16):
model = MixtralForCausalLM(
AutoConfig.from_pretrained(
model_name,
num_local_experts=0,
torch_dtype=torch.float16,
device_map=device,
),
)
model_config = AutoConfig.from_pretrained(model_name)
replace_attn_layers(model, model_config, quant_config, device)
state_index_path = os.path.join(state_path, "model.safetensors.index.json")
with open(state_index_path) as f:
weight_map = json.load(f)["weight_map"]
trunk_state_path = os.path.join(
state_path,
weight_map["model.embed_tokens.weight"],
)
model.load_state_dict(load_file(trunk_state_path, device=str(device)), strict=True)
| expert_cache = ExpertCache( | 0 | 2023-12-15 03:32:35+00:00 | 12k |
open-mmlab/PIA | app.py | [
{
"identifier": "I2VPipeline",
"path": "animatediff/pipelines/i2v_pipeline.py",
"snippet": "class I2VPipeline(DiffusionPipeline, IPAdapterMixin, TextualInversionLoaderMixin):\n _optional_components = []\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet3DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n EulerDiscreteScheduler,\n EulerAncestralDiscreteScheduler,\n DPMSolverMultistepScheduler,\n ],\n # memory_format: torch.memory_format,\n feature_extractor: CLIPImageProcessor = None,\n image_encoder: CLIPVisionModelWithProjection = None,\n ):\n super().__init__()\n\n if hasattr(scheduler.config, \"steps_offset\") and scheduler.config.steps_offset != 1:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`\"\n f\" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure \"\n \"to update the config accordingly as leaving `steps_offset` might led to incorrect results\"\n \" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,\"\n \" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`\"\n \" file\"\n )\n deprecate(\"steps_offset!=1\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"steps_offset\"] = 1\n scheduler._internal_dict = FrozenDict(new_config)\n\n if hasattr(scheduler.config, \"clip_sample\") and scheduler.config.clip_sample is True:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`.\"\n \" `clip_sample` should be set to False in the configuration file. Please make sure to update the\"\n \" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in\"\n \" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very\"\n \" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file\"\n )\n deprecate(\"clip_sample not set\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"clip_sample\"] = False\n scheduler._internal_dict = FrozenDict(new_config)\n\n is_unet_version_less_0_9_0 = hasattr(unet.config, \"_diffusers_version\") and version.parse(\n version.parse(unet.config._diffusers_version).base_version\n ) < version.parse(\"0.9.0.dev0\")\n is_unet_sample_size_less_64 = hasattr(unet.config, \"sample_size\") and unet.config.sample_size < 64\n if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:\n deprecation_message = (\n \"The configuration file of the unet has set the default `sample_size` to smaller than\"\n \" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the\"\n \" following: \\n- CompVis/stable-diffusion-v1-4 \\n- CompVis/stable-diffusion-v1-3 \\n-\"\n \" CompVis/stable-diffusion-v1-2 \\n- CompVis/stable-diffusion-v1-1 \\n- runwayml/stable-diffusion-v1-5\"\n \" \\n- runwayml/stable-diffusion-inpainting \\n you should change 'sample_size' to 64 in the\"\n \" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`\"\n \" in the config might lead to incorrect results in future versions. If you have downloaded this\"\n \" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for\"\n \" the `unet/config.json` file\"\n )\n deprecate(\"sample_size<64\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(unet.config)\n new_config[\"sample_size\"] = 64\n unet._internal_dict = FrozenDict(new_config)\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n image_encoder=image_encoder,\n feature_extractor=feature_extractor,\n scheduler=scheduler,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n # self.memory_format = memory_format\n self.use_ip_adapter = False\n\n @classmethod\n def build_pipeline(cls,\n base_cfg,\n base_model: str,\n unet_path: str,\n dreambooth_path: Optional[str] = None,\n lora_path: Optional[str] = None,\n lora_alpha: float = 0,\n vae_path: Optional[str] = None,\n ip_adapter_path: Optional[str] = None,\n ip_adapter_scale: float = 0.0,\n only_load_vae_decoder: bool = False,\n only_load_vae_encoder: bool = False) -> 'I2VPipeline':\n \"\"\"Method to build pipeline in a faster way~\n Args:\n base_cfg: The config to build model\n base_mode: The model id to initialize StableDiffusion\n unet_path: Path for i2v unet\n\n dreambooth_path: path for dreambooth model\n lora_path: path for lora model\n lora_alpha: value for lora scale\n\n only_load_vae_decoder: Only load VAE decoder from dreambooth / VAE ckpt\n and maitain encoder as original.\n\n \"\"\"\n # build unet\n unet = UNet3DConditionModel.from_pretrained_2d(\n base_model, subfolder=\"unet\",\n unet_additional_kwargs=OmegaConf.to_container(\n base_cfg.unet_additional_kwargs))\n\n old_weights = unet.conv_in.weight\n old_bias = unet.conv_in.bias\n new_conv1 = InflatedConv3d(\n 9, old_weights.shape[0],\n kernel_size=unet.conv_in.kernel_size,\n stride=unet.conv_in.stride,\n padding=unet.conv_in.padding,\n bias=True if old_bias is not None else False)\n param = torch.zeros((320,5,3,3),requires_grad=True)\n new_conv1.weight = torch.nn.Parameter(torch.cat((old_weights,param),dim=1))\n if old_bias is not None:\n new_conv1.bias = old_bias\n unet.conv_in = new_conv1\n unet.config[\"in_channels\"] = 9\n\n unet_ckpt = torch.load(unet_path, map_location='cpu')\n unet.load_state_dict(unet_ckpt, strict=False)\n # NOTE: only load temporal layers and condition module\n # for key, value in unet_ckpt.items():\n # if 'motion' in key or 'conv_in' in key:\n # unet.state_dict()[key].copy_(value)\n\n # load vae, tokenizer, text encoder\n vae = AutoencoderKL.from_pretrained(base_model, subfolder=\"vae\")\n tokenizer = CLIPTokenizer.from_pretrained(base_model, subfolder=\"tokenizer\")\n text_encoder = CLIPTextModel.from_pretrained(base_model, subfolder=\"text_encoder\")\n noise_scheduler = DDIMScheduler(**OmegaConf.to_container(base_cfg.noise_scheduler_kwargs))\n\n if dreambooth_path:\n\n print(\" >>> Begin loading DreamBooth >>>\")\n base_model_state_dict = {}\n with safe_open(dreambooth_path, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n base_model_state_dict[key] = f.get_tensor(key)\n\n # load unet\n converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, unet.config)\n\n old_value = converted_unet_checkpoint['conv_in.weight']\n new_param = unet_ckpt['conv_in.weight'][:,4:,:,:].clone().cpu()\n new_value = torch.nn.Parameter(torch.cat((old_value, new_param), dim=1))\n converted_unet_checkpoint['conv_in.weight'] = new_value\n unet.load_state_dict(converted_unet_checkpoint, strict=False)\n\n # load vae\n converted_vae_checkpoint = convert_ldm_vae_checkpoint(\n base_model_state_dict, vae.config,\n only_decoder=only_load_vae_decoder,\n only_encoder=only_load_vae_encoder,)\n need_strict = not (only_load_vae_decoder or only_load_vae_encoder)\n vae.load_state_dict(converted_vae_checkpoint, strict=need_strict)\n print('Prefix in loaded VAE checkpoint: ')\n print(set([k.split('.')[0] for k in converted_vae_checkpoint.keys()]))\n\n # load text encoder\n text_encoder_checkpoint = convert_ldm_clip_checkpoint(base_model_state_dict)\n if text_encoder_checkpoint:\n text_encoder.load_state_dict(text_encoder_checkpoint, strict=False)\n\n print(\" <<< Loaded DreamBooth <<<\")\n\n if vae_path:\n print(' >>> Begin loading VAE >>>')\n vae_state_dict = {}\n if vae_path.endswith('safetensors'):\n with safe_open(vae_path, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n vae_state_dict[key] = f.get_tensor(key)\n elif vae_path.endswith('ckpt') or vae_path.endswith('pt'):\n vae_state_dict = torch.load(vae_path, map_location='cpu')\n if 'state_dict' in vae_state_dict:\n vae_state_dict = vae_state_dict['state_dict']\n\n vae_state_dict = {f'first_stage_model.{k}': v for k, v in vae_state_dict.items()}\n\n converted_vae_checkpoint = convert_ldm_vae_checkpoint(\n vae_state_dict, vae.config,\n only_decoder=only_load_vae_decoder,\n only_encoder=only_load_vae_encoder,)\n print('Prefix in loaded VAE checkpoint: ')\n print(set([k.split('.')[0] for k in converted_vae_checkpoint.keys()]))\n need_strict = not (only_load_vae_decoder or only_load_vae_encoder)\n vae.load_state_dict(converted_vae_checkpoint, strict=need_strict)\n print(\" <<< Loaded VAE <<<\")\n\n if lora_path:\n\n print(\" >>> Begin loading LoRA >>>\")\n\n lora_dict = {}\n with safe_open(lora_path, framework='pt', device='cpu') as file:\n for k in file.keys():\n lora_dict[k] = file.get_tensor(k)\n unet, text_encoder = convert_lora_model_level(\n lora_dict, unet, text_encoder, alpha=lora_alpha)\n\n print(\" <<< Loaded LoRA <<<\")\n\n # move model to device\n device = torch.device('cuda')\n unet_dtype = torch.float16\n tenc_dtype = torch.float16\n vae_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float32\n\n unet = unet.to(device=device, dtype=unet_dtype)\n text_encoder = text_encoder.to(device=device, dtype=tenc_dtype)\n vae = vae.to(device=device, dtype=vae_dtype)\n print(f'Set Unet to {unet_dtype}')\n print(f'Set text encoder to {tenc_dtype}')\n print(f'Set vae to {vae_dtype}')\n\n if is_xformers_available():\n unet.enable_xformers_memory_efficient_attention()\n\n pipeline = cls(unet=unet,\n vae=vae,\n tokenizer=tokenizer,\n text_encoder=text_encoder,\n scheduler=noise_scheduler)\n\n # ip_adapter_path = 'h94/IP-Adapter'\n if ip_adapter_path and ip_adapter_scale > 0:\n ip_adapter_name = 'ip-adapter_sd15.bin'\n # only online repo need subfolder\n if not osp.isdir(ip_adapter_path):\n subfolder = 'models'\n else:\n subfolder = ''\n pipeline.load_ip_adapter(ip_adapter_path, subfolder, ip_adapter_name)\n pipeline.set_ip_adapter_scale(ip_adapter_scale)\n pipeline.use_ip_adapter = True\n print(f'Load IP-Adapter, scale: {ip_adapter_scale}')\n\n # text_inversion_path = './models/TextualInversion/easynegative.safetensors'\n # if text_inversion_path:\n # pipeline.load_textual_inversion(text_inversion_path, 'easynegative')\n\n return pipeline\n\n def enable_vae_slicing(self):\n self.vae.enable_slicing()\n\n def disable_vae_slicing(self):\n self.vae.disable_slicing()\n\n def enable_sequential_cpu_offload(self, gpu_id=0):\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:\n if cpu_offloaded_model is not None:\n cpu_offload(cpu_offloaded_model, device)\n\n @property\n def _execution_device(self):\n if self.device != torch.device(\"meta\") or not hasattr(self.unet, \"_hf_hook\"):\n return self.device\n for module in self.unet.modules():\n if (\n hasattr(module, \"_hf_hook\")\n and hasattr(module._hf_hook, \"execution_device\")\n and module._hf_hook.execution_device is not None\n ):\n return torch.device(module._hf_hook.execution_device)\n return self.device\n\n def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt):\n batch_size = len(prompt) if isinstance(prompt, list) else 1\n\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):\n removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n text_embeddings = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n text_embeddings = text_embeddings[0]\n\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = text_embeddings.shape\n text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1)\n text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1)\n\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n max_length = text_input_ids.shape[-1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = uncond_input.attention_mask.to(device)\n else:\n attention_mask = None\n\n uncond_embeddings = self.text_encoder(\n uncond_input.input_ids.to(device),\n attention_mask=attention_mask,\n )\n uncond_embeddings = uncond_embeddings[0]\n\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = uncond_embeddings.shape[1]\n uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1)\n uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings])\n\n return text_embeddings\n\n def decode_latents(self, latents):\n video_length = latents.shape[2]\n latents = 1 / 0.18215 * latents\n latents = rearrange(latents, \"b c f h w -> (b f) c h w\")\n # video = self.vae.decode(latents).sample\n video = []\n for frame_idx in tqdm(range(latents.shape[0])):\n video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample)\n video = torch.cat(video)\n video = rearrange(video, \"(b f) c h w -> b c f h w\", f=video_length)\n video = (video / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16\n video = video.cpu().float().numpy()\n return video\n\n def prepare_extra_step_kwargs(self, generator, eta):\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n return extra_step_kwargs\n\n def check_inputs(self, prompt, height, width, callback_steps):\n if not isinstance(prompt, str) and not isinstance(prompt, list):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n def get_timesteps(self, num_inference_steps, strength, device):\n # get the original timestep using init_timestep\n init_timestep = min(int(num_inference_steps * strength), num_inference_steps)\n\n t_start = max(num_inference_steps - init_timestep, 0)\n timesteps = self.scheduler.timesteps[t_start:]\n\n return timesteps, num_inference_steps - t_start\n\n def prepare_latents(self, add_noise_time_step, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None):\n shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor)\n\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n if latents is None:\n rand_device = \"cpu\" if device.type == \"mps\" else device\n\n if isinstance(generator, list):\n shape = shape\n # shape = (1,) + shape[1:]\n latents = [\n torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype)\n for i in range(batch_size)\n ]\n latents = torch.cat(latents, dim=0).to(device)\n else:\n latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device)\n else:\n if latents.shape != shape:\n raise ValueError(f\"Unexpected latents shape, got {latents.shape}, expected {shape}\")\n latents = latents.to(device)\n\n return latents\n\n def encode_image(self, image, device, num_images_per_prompt):\n \"\"\"Encode image for ip-adapter. Copied from\n https://github.com/huggingface/diffusers/blob/f9487783228cd500a21555da3346db40e8f05992/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L492-L514 # noqa\n \"\"\"\n dtype = next(self.image_encoder.parameters()).dtype\n\n if not isinstance(image, torch.Tensor):\n image = self.feature_extractor(image, return_tensors=\"pt\").pixel_values\n\n image = image.to(device=device, dtype=dtype)\n image_embeds = self.image_encoder(image).image_embeds\n image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)\n\n uncond_image_embeds = torch.zeros_like(image_embeds)\n return image_embeds, uncond_image_embeds\n\n @torch.no_grad()\n def __call__(\n self,\n image: np.ndarray,\n prompt: Union[str, List[str]],\n video_length: Optional[int],\n height: Optional[int] = None,\n width: Optional[int] = None,\n global_inf_num: int = 0,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_videos_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"tensor\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n\n cond_frame: int = 0,\n mask_sim_template_idx: int = 0,\n ip_adapter_scale: float = 0,\n strength: float = 1,\n progress_fn=None,\n **kwargs,\n ):\n # Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n assert strength > 0 and strength <= 1, (\n f'\"strength\" for img2vid must in (0, 1]. But receive {strength}.')\n\n # Check inputs. Raise error if not correct\n self.check_inputs(prompt, height, width, callback_steps)\n\n # Define call parameters\n # batch_size = 1 if isinstance(prompt, str) else len(prompt)\n batch_size = 1\n if latents is not None:\n batch_size = latents.shape[0]\n if isinstance(prompt, list):\n batch_size = len(prompt)\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # Encode input prompt\n prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size\n\n if negative_prompt is None:\n negative_prompt = DEFAULT_N_PROMPT\n negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size\n text_embeddings = self._encode_prompt(\n prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt\n )\n\n # Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n #timesteps = self.scheduler.timesteps\n timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)\n latent_timestep = timesteps[:1].repeat(batch_size)\n\n # Prepare latent variables\n num_channels_latents = self.unet.in_channels\n latents = self.prepare_latents(\n latent_timestep,\n batch_size * num_videos_per_prompt,\n 4,\n video_length,\n height,\n width,\n text_embeddings.dtype,\n device,\n generator,\n latents,\n )\n\n shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor)\n\n raw_image = image.copy()\n image = torch.from_numpy(image)[None, ...].permute(0, 3, 1, 2)\n image = image / 255 # [0, 1]\n image = image * 2 - 1 # [-1, 1]\n image = image.to(device=device, dtype=self.vae.dtype)\n\n if isinstance(generator, list):\n image_latent = [\n self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size)\n ]\n image_latent = torch.cat(image_latent, dim=0)\n else:\n image_latent = self.vae.encode(image).latent_dist.sample(generator)\n\n image_latent = image_latent.to(device=device, dtype=self.unet.dtype)\n image_latent = torch.nn.functional.interpolate(image_latent, size=[shape[-2], shape[-1]])\n image_latent_padding = image_latent.clone() * 0.18215\n mask = torch.zeros((shape[0], 1, shape[2], shape[3], shape[4])).to(device=device, dtype=self.unet.dtype)\n\n # prepare mask\n mask_coef = prepare_mask_coef_by_statistics(video_length, cond_frame, mask_sim_template_idx)\n\n masked_image = torch.zeros(shape[0], 4, shape[2], shape[3], shape[4]).to(device=device, dtype=self.unet.dtype)\n for f in range(video_length):\n mask[:,:,f,:,:] = mask_coef[f]\n masked_image[:,:,f,:,:] = image_latent_padding.clone()\n\n # Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask\n masked_image = torch.cat([masked_image] * 2) if do_classifier_free_guidance else masked_image\n # Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n\n # prepare for ip-adapter\n if self.use_ip_adapter:\n image_embeds, neg_image_embeds = self.encode_image(raw_image, device, num_videos_per_prompt)\n image_embeds = torch.cat([neg_image_embeds, image_embeds])\n image_embeds = image_embeds.to(device=device, dtype=self.unet.dtype)\n\n self.set_ip_adapter_scale(ip_adapter_scale)\n print(f'Set IP-Adapter Scale as {ip_adapter_scale}')\n\n else:\n\n image_embeds = None\n\n # prepare for latents if strength < 1, add convert gaussian latent to masked_img and add noise\n if strength < 1:\n noise = torch.randn_like(latents)\n latents = self.scheduler.add_noise(masked_image[0], noise, timesteps[0])\n print(latents.shape)\n\n if progress_fn is None:\n progress_bar = tqdm(timesteps)\n terminal_pbar = None\n else:\n progress_bar = progress_fn.tqdm(timesteps)\n terminal_pbar = tqdm(total=len(timesteps))\n\n # with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(progress_bar):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n mask,\n masked_image,\n t,\n encoder_hidden_states=text_embeddings,\n image_embeds=image_embeds\n )['sample']\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n if terminal_pbar is not None:\n terminal_pbar.update(1)\n\n # Post-processing\n video = self.decode_latents(latents.to(device, dtype=self.vae.dtype))\n\n # Convert to tensor\n if output_type == \"tensor\":\n video = torch.from_numpy(video)\n\n if not return_dict:\n return video\n\n return AnimationPipelineOutput(videos=video)"
},
{
"identifier": "save_videos_grid",
"path": "animatediff/utils/util.py",
"snippet": "def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):\n videos = rearrange(videos, \"b c t h w -> t b c h w\")\n outputs = []\n for x in videos:\n x = torchvision.utils.make_grid(x, nrow=n_rows)\n x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)\n if rescale:\n x = (x + 1.0) / 2.0 # -1,1 -> 0,1\n x = torch.clamp((x * 255), 0, 255).numpy().astype(np.uint8)\n outputs.append(x)\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n imageio.mimsave(path, outputs, fps=fps)"
}
] | import json
import os
import os.path as osp
import random
import gradio as gr
import numpy as np
import torch
from argparse import ArgumentParser
from datetime import datetime
from glob import glob
from diffusers import DDIMScheduler, EulerDiscreteScheduler, PNDMScheduler
from omegaconf import OmegaConf
from PIL import Image
from animatediff.pipelines import I2VPipeline
from animatediff.utils.util import save_videos_grid | 9,273 | self.basedir, args.save_path, datetime.now().strftime("Gradio-%Y-%m-%dT%H-%M-%S"))
self.savedir_sample = os.path.join(self.savedir, "sample")
os.makedirs(self.savedir, exist_ok=True)
self.stable_diffusion_list = []
self.motion_module_list = []
self.personalized_model_list = []
self.refresh_personalized_model()
self.pipeline = None
self.inference_config = OmegaConf.load(args.config)
self.stable_diffusion_dir = self.inference_config.pretrained_model_path
self.pia_path = self.inference_config.generate.model_path
self.loaded = False
def refresh_personalized_model(self):
personalized_model_list = glob(os.path.join(
self.personalized_model_dir, "*.safetensors"))
self.personalized_model_list = [
os.path.basename(p) for p in personalized_model_list]
def get_ip_apdater_folder(self):
file_list = os.listdir(self.ip_adapter_dir)
if not file_list:
return False
if not 'ip-adapter_sd15.bin' not in file_list:
print('Cannot find "ip-adapter_sd15.bin" '
f'under {self.ip_adapter_dir}')
return False
if not 'image_encoder' not in file_list:
print(f'Cannot find "image_encoder" under {self.ip_adapter_dir}')
return False
return True
def load_model(self,
dreambooth_path=None,
lora_path=None,
lora_alpha=1.0,
enable_ip_adapter=True):
gr.Info('Start Load Models...')
print('Start Load Models...')
if lora_path and lora_path.upper() != 'NONE':
lora_path = osp.join(self.personalized_model_dir, lora_path)
else:
lora_path = None
if dreambooth_path and dreambooth_path.upper() != 'NONE':
dreambooth_path = osp.join(
self.personalized_model_dir, dreambooth_path)
else:
dreambooth_path = None
if enable_ip_adapter:
if not self.get_ip_apdater_folder():
print('Load IP-Adapter from remote.')
ip_adapter_path = 'h94/IP-Adapter'
else:
ip_adapter_path = self.ip_adapter_dir
else:
ip_adapter_path = None
self.pipeline = I2VPipeline.build_pipeline(
self.inference_config,
self.stable_diffusion_dir,
unet_path=self.pia_path,
dreambooth_path=dreambooth_path,
lora_path=lora_path,
lora_alpha=lora_alpha,
ip_adapter_path=ip_adapter_path)
gr.Info('Load Finish!')
print('Load Finish!')
self.loaded = True
return 'Load'
def animate(
self,
init_img,
motion_scale,
prompt_textbox,
negative_prompt_textbox,
sampler_dropdown,
sample_step_slider,
length_slider,
cfg_scale_slider,
seed_textbox,
ip_adapter_scale,
max_size,
progress=gr.Progress(),
):
if not self.loaded:
raise gr.Error(f"Please load model first!")
if seed_textbox != -1 and seed_textbox != "":
torch.manual_seed(int(seed_textbox))
else:
torch.seed()
seed = torch.initial_seed()
init_img, h, w = preprocess_img(init_img, max_size)
sample = self.pipeline(
image=init_img,
prompt=prompt_textbox,
negative_prompt=negative_prompt_textbox,
num_inference_steps=sample_step_slider,
guidance_scale=cfg_scale_slider,
width=w,
height=h,
video_length=16,
mask_sim_template_idx=motion_scale,
ip_adapter_scale=ip_adapter_scale,
progress_fn=progress,
).videos
save_sample_path = os.path.join(
self.savedir_sample, f"{sample_idx}.mp4")
|
sample_idx = 0
scheduler_dict = {
"DDIM": DDIMScheduler,
"Euler": EulerDiscreteScheduler,
"PNDM": PNDMScheduler,
}
css = """
.toolbutton {
margin-buttom: 0em 0em 0em 0em;
max-width: 2.5em;
min-width: 2.5em !important;
height: 2.5em;
}
"""
parser = ArgumentParser()
parser.add_argument('--config', type=str, default='example/config/base.yaml')
parser.add_argument('--server-name', type=str, default='0.0.0.0')
parser.add_argument('--port', type=int, default=7860)
parser.add_argument('--share', action='store_true')
parser.add_argument('--save-path', default='samples')
args = parser.parse_args()
N_PROMPT = ('wrong white balance, dark, sketches,worst quality,low quality, '
'deformed, distorted, disfigured, bad eyes, wrong lips, '
'weird mouth, bad teeth, mutated hands and fingers, bad anatomy,'
'wrong anatomy, amputation, extra limb, missing limb, '
'floating,limbs, disconnected limbs, mutation, ugly, disgusting, '
'bad_pictures, negative_hand-neg')
def preprocess_img(img_np, max_size: int = 512):
ori_image = Image.fromarray(img_np).convert('RGB')
width, height = ori_image.size
long_edge = max(width, height)
if long_edge > max_size:
scale_factor = max_size / long_edge
else:
scale_factor = 1
width = int(width * scale_factor)
height = int(height * scale_factor)
ori_image = ori_image.resize((width, height))
if (width % 8 != 0) or (height % 8 != 0):
in_width = (width // 8) * 8
in_height = (height // 8) * 8
else:
in_width = width
in_height = height
in_image = ori_image
in_image = ori_image.resize((in_width, in_height))
in_image_np = np.array(in_image)
return in_image_np, in_height, in_width
class AnimateController:
def __init__(self):
# config dirs
self.basedir = os.getcwd()
self.personalized_model_dir = os.path.join(
self.basedir, "models", "DreamBooth_LoRA")
self.ip_adapter_dir = os.path.join(
self.basedir, "models", "IP_Adapter")
self.savedir = os.path.join(
self.basedir, args.save_path, datetime.now().strftime("Gradio-%Y-%m-%dT%H-%M-%S"))
self.savedir_sample = os.path.join(self.savedir, "sample")
os.makedirs(self.savedir, exist_ok=True)
self.stable_diffusion_list = []
self.motion_module_list = []
self.personalized_model_list = []
self.refresh_personalized_model()
self.pipeline = None
self.inference_config = OmegaConf.load(args.config)
self.stable_diffusion_dir = self.inference_config.pretrained_model_path
self.pia_path = self.inference_config.generate.model_path
self.loaded = False
def refresh_personalized_model(self):
personalized_model_list = glob(os.path.join(
self.personalized_model_dir, "*.safetensors"))
self.personalized_model_list = [
os.path.basename(p) for p in personalized_model_list]
def get_ip_apdater_folder(self):
file_list = os.listdir(self.ip_adapter_dir)
if not file_list:
return False
if not 'ip-adapter_sd15.bin' not in file_list:
print('Cannot find "ip-adapter_sd15.bin" '
f'under {self.ip_adapter_dir}')
return False
if not 'image_encoder' not in file_list:
print(f'Cannot find "image_encoder" under {self.ip_adapter_dir}')
return False
return True
def load_model(self,
dreambooth_path=None,
lora_path=None,
lora_alpha=1.0,
enable_ip_adapter=True):
gr.Info('Start Load Models...')
print('Start Load Models...')
if lora_path and lora_path.upper() != 'NONE':
lora_path = osp.join(self.personalized_model_dir, lora_path)
else:
lora_path = None
if dreambooth_path and dreambooth_path.upper() != 'NONE':
dreambooth_path = osp.join(
self.personalized_model_dir, dreambooth_path)
else:
dreambooth_path = None
if enable_ip_adapter:
if not self.get_ip_apdater_folder():
print('Load IP-Adapter from remote.')
ip_adapter_path = 'h94/IP-Adapter'
else:
ip_adapter_path = self.ip_adapter_dir
else:
ip_adapter_path = None
self.pipeline = I2VPipeline.build_pipeline(
self.inference_config,
self.stable_diffusion_dir,
unet_path=self.pia_path,
dreambooth_path=dreambooth_path,
lora_path=lora_path,
lora_alpha=lora_alpha,
ip_adapter_path=ip_adapter_path)
gr.Info('Load Finish!')
print('Load Finish!')
self.loaded = True
return 'Load'
def animate(
self,
init_img,
motion_scale,
prompt_textbox,
negative_prompt_textbox,
sampler_dropdown,
sample_step_slider,
length_slider,
cfg_scale_slider,
seed_textbox,
ip_adapter_scale,
max_size,
progress=gr.Progress(),
):
if not self.loaded:
raise gr.Error(f"Please load model first!")
if seed_textbox != -1 and seed_textbox != "":
torch.manual_seed(int(seed_textbox))
else:
torch.seed()
seed = torch.initial_seed()
init_img, h, w = preprocess_img(init_img, max_size)
sample = self.pipeline(
image=init_img,
prompt=prompt_textbox,
negative_prompt=negative_prompt_textbox,
num_inference_steps=sample_step_slider,
guidance_scale=cfg_scale_slider,
width=w,
height=h,
video_length=16,
mask_sim_template_idx=motion_scale,
ip_adapter_scale=ip_adapter_scale,
progress_fn=progress,
).videos
save_sample_path = os.path.join(
self.savedir_sample, f"{sample_idx}.mp4") | save_videos_grid(sample, save_sample_path) | 1 | 2023-12-21 03:29:34+00:00 | 12k |
xinghaochen/TinySAM | tinysam/hierarchical_mask_generator.py | [
{
"identifier": "Sam",
"path": "tinysam/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: Union[ImageEncoderViT, TinyViT],\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x"
},
{
"identifier": "SamPredictor",
"path": "tinysam/predictor.py",
"snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n #import pdb; pdb.set_trace()\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None"
},
{
"identifier": "MaskData",
"path": "tinysam/utils/amg.py",
"snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()"
},
{
"identifier": "area_from_rle",
"path": "tinysam/utils/amg.py",
"snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])"
},
{
"identifier": "batch_iterator",
"path": "tinysam/utils/amg.py",
"snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]"
},
{
"identifier": "batched_mask_to_box",
"path": "tinysam/utils/amg.py",
"snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out"
},
{
"identifier": "box_xyxy_to_xywh",
"path": "tinysam/utils/amg.py",
"snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh"
},
{
"identifier": "build_all_layer_point_grids",
"path": "tinysam/utils/amg.py",
"snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer"
},
{
"identifier": "calculate_stability_score",
"path": "tinysam/utils/amg.py",
"snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions"
},
{
"identifier": "coco_encode_rle",
"path": "tinysam/utils/amg.py",
"snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle"
},
{
"identifier": "generate_crop_boxes",
"path": "tinysam/utils/amg.py",
"snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs"
},
{
"identifier": "is_box_near_crop_edge",
"path": "tinysam/utils/amg.py",
"snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)"
},
{
"identifier": "mask_to_rle_pytorch",
"path": "tinysam/utils/amg.py",
"snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out"
},
{
"identifier": "remove_small_regions",
"path": "tinysam/utils/amg.py",
"snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True"
},
{
"identifier": "rle_to_mask",
"path": "tinysam/utils/amg.py",
"snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order"
},
{
"identifier": "uncrop_boxes_xyxy",
"path": "tinysam/utils/amg.py",
"snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset"
},
{
"identifier": "uncrop_masks",
"path": "tinysam/utils/amg.py",
"snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)"
},
{
"identifier": "uncrop_points",
"path": "tinysam/utils/amg.py",
"snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset"
}
] | import numpy as np
import torch
import cv2 # type: ignore # noqa: F401
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
from typing import Any, Dict, List, Optional, Tuple
from .modeling import Sam
from .predictor import SamPredictor
from .utils.amg import (
MaskData,
area_from_rle,
batch_iterator,
batched_mask_to_box,
box_xyxy_to_xywh,
build_all_layer_point_grids,
calculate_stability_score,
coco_encode_rle,
generate_crop_boxes,
is_box_near_crop_edge,
mask_to_rle_pytorch,
remove_small_regions,
rle_to_mask,
uncrop_boxes_xyxy,
uncrop_masks,
uncrop_points,
)
from pycocotools import mask as mask_utils # type: ignore # noqa: F401 | 10,399 | self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
if min_mask_region_area > 0:
self.predictor = SamPredictor(model)
self.points_per_side = points_per_side
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.high_score_thresh = high_score_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
def set_point_grids(self, point_grids):
self.point_grids = point_grids
def set_points_per_side(self, points_per_side):
self.point_grids = build_all_layer_point_grids(
points_per_side,
0,
1,
)
@torch.no_grad()
def set_image(self, image: np.ndarray) -> MaskData:
# Crop the image and calculate embeddings
self.predictor.set_image(image)
@torch.no_grad()
def hierarchical_generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
self.set_image(image)
self.set_points_per_side(self.points_per_side // 4)
ori_masks, or_results = self.generate(image, True)
ih, iw, _ = image.shape
hstride = ih // self.points_per_side
wstride = iw // self.points_per_side
new_points = []
pass_counter = 0
full_point_grids = np.array(self.point_grids)
for mask in range(full_point_grids.shape[1]):
point_coords = [full_point_grids[0, mask, 0] * iw, full_point_grids[0, mask, 1] * ih]
for sy in [-1, 0, 1]:
for sx in [-1, 0, 1]:
if (sy == 0 and sx == 0) or or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * sx)]:
continue
new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * sx) / ih])
if point_coords[0] + wstride * 2 < iw:
for sx in [-1, 0, 1]:
if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * sx)]:
continue
new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * sx) / ih])
if point_coords[1] + hstride * 2 < ih:
for sy in [-1, 0, 1]:
if or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * 2)]:
continue
new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * 2) / ih])
if point_coords[0] + wstride * 2 < iw and point_coords[1] + hstride * 2 < ih:
if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * 2)]:
continue
new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * 2) / ih])
self.set_point_grids([np.array(new_points)])
new_masks = self.generate(image, False)
new_masks.cat(ori_masks)
new_masks = self.post_process(image, new_masks)
return new_masks
@torch.no_grad()
def generate(self, image: np.ndarray, need_high: bool) -> MaskData:
orig_size = image.shape[:2]
# Get points for this crop
points_scale = np.array(orig_size)[None, ::-1]
points_for_image = self.point_grids[0] * points_scale
# Generate masks for this crop in batches
data = MaskData()
for (points,) in batch_iterator(self.points_per_batch, points_for_image):
orig_h, orig_w = orig_size
# Run model on this batch
transformed_points = self.predictor.transform.apply_coords(points, orig_size)
in_points = torch.as_tensor(transformed_points, device=self.predictor.device)
in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
masks, iou_preds, _ = self.predictor.predict_torch(
in_points[:, None, :],
in_labels[:, None],
return_logits=True,
)
# Serialize predictions and store in MaskData
batch_data = MaskData(
masks=masks.flatten(0, 1),
iou_preds=iou_preds.flatten(0, 1),
points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)),
)
del masks
if self.pred_iou_thresh > 0.0:
keep_mask = batch_data["iou_preds"] > self.pred_iou_thresh
batch_data.filter(keep_mask)
# Calculate stability score
| # Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
class SamHierarchicalMaskGenerator:
def __init__(
self,
model: Sam,
points_per_side: Optional[int] = 32,
points_per_batch: int = 64,
pred_iou_thresh: float = 0.88,
high_score_thresh: float = 8.5,
stability_score_thresh: float = 0.95,
stability_score_offset: float = 1.0,
box_nms_thresh: float = 0.7,
crop_n_layers: int = 0,
crop_nms_thresh: float = 0.7,
crop_overlap_ratio: float = 512 / 1500,
crop_n_points_downscale_factor: int = 1,
point_grids: Optional[List[np.ndarray]] = None,
min_mask_region_area: int = 0,
output_mode: str = "binary_mask",
) -> None:
"""
Using a SAM model, generates masks for the entire image.
Generates a grid of point prompts over the image, then filters
low quality and duplicate masks. The default settings are chosen
for SAM with a ViT-H backbone.
Arguments:
model (Sam): The SAM model to use for mask prediction.
points_per_side (int or None): The number of points to be sampled
along one side of the image. The total number of points is
points_per_side**2. If None, 'point_grids' must provide explicit
point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
high_score_thresh (float): A filtering threshold in [-inf,inf], to find out
the unmasked area for the next generation.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
self.point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layers,
crop_n_points_downscale_factor,
)
elif point_grids is not None:
self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
if min_mask_region_area > 0:
self.predictor = SamPredictor(model)
self.points_per_side = points_per_side
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.high_score_thresh = high_score_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
def set_point_grids(self, point_grids):
self.point_grids = point_grids
def set_points_per_side(self, points_per_side):
self.point_grids = build_all_layer_point_grids(
points_per_side,
0,
1,
)
@torch.no_grad()
def set_image(self, image: np.ndarray) -> MaskData:
# Crop the image and calculate embeddings
self.predictor.set_image(image)
@torch.no_grad()
def hierarchical_generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
self.set_image(image)
self.set_points_per_side(self.points_per_side // 4)
ori_masks, or_results = self.generate(image, True)
ih, iw, _ = image.shape
hstride = ih // self.points_per_side
wstride = iw // self.points_per_side
new_points = []
pass_counter = 0
full_point_grids = np.array(self.point_grids)
for mask in range(full_point_grids.shape[1]):
point_coords = [full_point_grids[0, mask, 0] * iw, full_point_grids[0, mask, 1] * ih]
for sy in [-1, 0, 1]:
for sx in [-1, 0, 1]:
if (sy == 0 and sx == 0) or or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * sx)]:
continue
new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * sx) / ih])
if point_coords[0] + wstride * 2 < iw:
for sx in [-1, 0, 1]:
if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * sx)]:
continue
new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * sx) / ih])
if point_coords[1] + hstride * 2 < ih:
for sy in [-1, 0, 1]:
if or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * 2)]:
continue
new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * 2) / ih])
if point_coords[0] + wstride * 2 < iw and point_coords[1] + hstride * 2 < ih:
if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * 2)]:
continue
new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * 2) / ih])
self.set_point_grids([np.array(new_points)])
new_masks = self.generate(image, False)
new_masks.cat(ori_masks)
new_masks = self.post_process(image, new_masks)
return new_masks
@torch.no_grad()
def generate(self, image: np.ndarray, need_high: bool) -> MaskData:
orig_size = image.shape[:2]
# Get points for this crop
points_scale = np.array(orig_size)[None, ::-1]
points_for_image = self.point_grids[0] * points_scale
# Generate masks for this crop in batches
data = MaskData()
for (points,) in batch_iterator(self.points_per_batch, points_for_image):
orig_h, orig_w = orig_size
# Run model on this batch
transformed_points = self.predictor.transform.apply_coords(points, orig_size)
in_points = torch.as_tensor(transformed_points, device=self.predictor.device)
in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
masks, iou_preds, _ = self.predictor.predict_torch(
in_points[:, None, :],
in_labels[:, None],
return_logits=True,
)
# Serialize predictions and store in MaskData
batch_data = MaskData(
masks=masks.flatten(0, 1),
iou_preds=iou_preds.flatten(0, 1),
points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)),
)
del masks
if self.pred_iou_thresh > 0.0:
keep_mask = batch_data["iou_preds"] > self.pred_iou_thresh
batch_data.filter(keep_mask)
# Calculate stability score | batch_data["stability_score"] = calculate_stability_score( | 8 | 2023-12-19 11:25:54+00:00 | 12k |
dcharatan/pixelsplat | src/model/model_wrapper.py | [
{
"identifier": "get_data_shim",
"path": "src/dataset/data_module.py",
"snippet": "def get_data_shim(encoder: nn.Module) -> DataShim:\n \"\"\"Get functions that modify the batch. It's sometimes necessary to modify batches\n outside the data loader because GPU computations are required to modify the batch or\n because the modification depends on something outside the data loader.\n \"\"\"\n\n shims: list[DataShim] = []\n if hasattr(encoder, \"get_data_shim\"):\n shims.append(encoder.get_data_shim())\n\n def combined_shim(batch):\n for shim in shims:\n batch = shim(batch)\n return batch\n\n return combined_shim"
},
{
"identifier": "BatchedExample",
"path": "src/dataset/types.py",
"snippet": "class BatchedExample(TypedDict, total=False):\n target: BatchedViews\n context: BatchedViews\n scene: list[str]"
},
{
"identifier": "compute_lpips",
"path": "src/evaluation/metrics.py",
"snippet": "@torch.no_grad()\ndef compute_lpips(\n ground_truth: Float[Tensor, \"batch channel height width\"],\n predicted: Float[Tensor, \"batch channel height width\"],\n) -> Float[Tensor, \" batch\"]:\n value = get_lpips(predicted.device).forward(ground_truth, predicted, normalize=True)\n return value[:, 0, 0, 0]"
},
{
"identifier": "compute_psnr",
"path": "src/evaluation/metrics.py",
"snippet": "@torch.no_grad()\ndef compute_psnr(\n ground_truth: Float[Tensor, \"batch channel height width\"],\n predicted: Float[Tensor, \"batch channel height width\"],\n) -> Float[Tensor, \" batch\"]:\n ground_truth = ground_truth.clip(min=0, max=1)\n predicted = predicted.clip(min=0, max=1)\n mse = reduce((ground_truth - predicted) ** 2, \"b c h w -> b\", \"mean\")\n return -10 * mse.log10()"
},
{
"identifier": "compute_ssim",
"path": "src/evaluation/metrics.py",
"snippet": "@torch.no_grad()\ndef compute_ssim(\n ground_truth: Float[Tensor, \"batch channel height width\"],\n predicted: Float[Tensor, \"batch channel height width\"],\n) -> Float[Tensor, \" batch\"]:\n ssim = [\n structural_similarity(\n gt.detach().cpu().numpy(),\n hat.detach().cpu().numpy(),\n win_size=11,\n gaussian_weights=True,\n channel_axis=0,\n data_range=1.0,\n )\n for gt, hat in zip(ground_truth, predicted)\n ]\n return torch.tensor(ssim, dtype=predicted.dtype, device=predicted.device)"
},
{
"identifier": "get_cfg",
"path": "src/global_cfg.py",
"snippet": "def get_cfg() -> DictConfig:\n global cfg\n return cfg"
},
{
"identifier": "Loss",
"path": "src/loss/loss.py",
"snippet": "class Loss(nn.Module, ABC, Generic[T_cfg, T_wrapper]):\n cfg: T_cfg\n name: str\n\n def __init__(self, cfg: T_wrapper) -> None:\n super().__init__()\n\n # Extract the configuration from the wrapper.\n (field,) = fields(type(cfg))\n self.cfg = getattr(cfg, field.name)\n self.name = field.name\n\n @abstractmethod\n def forward(\n self,\n prediction: DecoderOutput,\n batch: BatchedExample,\n gaussians: Gaussians,\n global_step: int,\n ) -> Float[Tensor, \"\"]:\n pass"
},
{
"identifier": "Benchmarker",
"path": "src/misc/benchmarker.py",
"snippet": "class Benchmarker:\n def __init__(self):\n self.execution_times = defaultdict(list)\n\n @contextmanager\n def time(self, tag: str, num_calls: int = 1):\n try:\n start_time = time()\n yield\n finally:\n end_time = time()\n for _ in range(num_calls):\n self.execution_times[tag].append((end_time - start_time) / num_calls)\n\n def dump(self, path: Path) -> None:\n path.parent.mkdir(exist_ok=True, parents=True)\n with path.open(\"w\") as f:\n json.dump(dict(self.execution_times), f)\n\n def dump_memory(self, path: Path) -> None:\n path.parent.mkdir(exist_ok=True, parents=True)\n with path.open(\"w\") as f:\n json.dump(torch.cuda.memory_stats()[\"allocated_bytes.all.peak\"], f)\n\n def summarize(self) -> None:\n for tag, times in self.execution_times.items():\n print(f\"{tag}: {len(times)} calls, avg. {np.mean(times)} seconds per call\")"
},
{
"identifier": "prep_image",
"path": "src/misc/image_io.py",
"snippet": "def prep_image(image: FloatImage) -> UInt8[np.ndarray, \"height width channel\"]:\n # Handle batched images.\n if image.ndim == 4:\n image = rearrange(image, \"b c h w -> c h (b w)\")\n\n # Handle single-channel images.\n if image.ndim == 2:\n image = rearrange(image, \"h w -> () h w\")\n\n # Ensure that there are 3 or 4 channels.\n channel, _, _ = image.shape\n if channel == 1:\n image = repeat(image, \"() h w -> c h w\", c=3)\n assert image.shape[0] in (3, 4)\n\n image = (image.detach().clip(min=0, max=1) * 255).type(torch.uint8)\n return rearrange(image, \"c h w -> h w c\").cpu().numpy()"
},
{
"identifier": "save_image",
"path": "src/misc/image_io.py",
"snippet": "def save_image(\n image: FloatImage,\n path: Union[Path, str],\n) -> None:\n \"\"\"Save an image. Assumed to be in range 0-1.\"\"\"\n\n # Create the parent directory if it doesn't already exist.\n path = Path(path)\n path.parent.mkdir(exist_ok=True, parents=True)\n\n # Save the image.\n Image.fromarray(prep_image(image)).save(path)"
},
{
"identifier": "LOG_PATH",
"path": "src/misc/LocalLogger.py",
"snippet": "LOG_PATH = Path(\"outputs/local\")"
},
{
"identifier": "LocalLogger",
"path": "src/misc/LocalLogger.py",
"snippet": "class LocalLogger(Logger):\n def __init__(self) -> None:\n super().__init__()\n self.experiment = None\n os.system(f\"rm -r {LOG_PATH}\")\n\n @property\n def name(self):\n return \"LocalLogger\"\n\n @property\n def version(self):\n return 0\n\n @rank_zero_only\n def log_hyperparams(self, params):\n pass\n\n @rank_zero_only\n def log_metrics(self, metrics, step):\n pass\n\n @rank_zero_only\n def log_image(\n self,\n key: str,\n images: list[Any],\n step: Optional[int] = None,\n **kwargs,\n ):\n # The function signature is the same as the wandb logger's, but the step is\n # actually required.\n assert step is not None\n for index, image in enumerate(images):\n path = LOG_PATH / f\"{key}/{index:0>2}_{step:0>6}.png\"\n path.parent.mkdir(exist_ok=True, parents=True)\n Image.fromarray(image).save(path)"
},
{
"identifier": "StepTracker",
"path": "src/misc/step_tracker.py",
"snippet": "class StepTracker:\n lock: RLock\n step: Int64[Tensor, \"\"]\n\n def __init__(self):\n self.lock = Manager().RLock()\n self.step = torch.tensor(0, dtype=torch.int64).share_memory_()\n\n def set_step(self, step: int) -> None:\n with self.lock:\n self.step.fill_(step)\n\n def get_step(self) -> int:\n with self.lock:\n return self.step.item()"
},
{
"identifier": "add_label",
"path": "src/visualization/annotation.py",
"snippet": "def add_label(\n image: Float[Tensor, \"3 width height\"],\n label: str,\n font: Path = Path(\"assets/Inter-Regular.otf\"),\n font_size: int = 24,\n) -> Float[Tensor, \"3 width_with_label height_with_label\"]:\n return vcat(\n draw_label(label, font, font_size, image.device),\n image,\n align=\"left\",\n gap=4,\n )"
},
{
"identifier": "interpolate_extrinsics",
"path": "src/visualization/camera_trajectory/interpolation.py",
"snippet": "@torch.no_grad()\ndef interpolate_extrinsics(\n initial: Float[Tensor, \"*#batch 4 4\"],\n final: Float[Tensor, \"*#batch 4 4\"],\n t: Float[Tensor, \" time_step\"],\n eps: float = 1e-4,\n) -> Float[Tensor, \"*batch time_step 4 4\"]:\n \"\"\"Interpolate extrinsics by rotating around their \"focus point,\" which is the\n least-squares intersection between the look vectors of the initial and final\n extrinsics.\n \"\"\"\n\n initial = initial.type(torch.float64)\n final = final.type(torch.float64)\n t = t.type(torch.float64)\n\n # Based on the dot product between the look vectors, pick from one of two cases:\n # 1. Look vectors are parallel: interpolate about their origins' midpoint.\n # 3. Look vectors aren't parallel: interpolate about their focus point.\n initial_look = initial[..., :3, 2]\n final_look = final[..., :3, 2]\n dot_products = einsum(initial_look, final_look, \"... i, ... i -> ...\")\n parallel_mask = (dot_products.abs() - 1).abs() < eps\n\n # Pick focus points.\n initial_origin = initial[..., :3, 3]\n final_origin = final[..., :3, 3]\n pivot_point = 0.5 * (initial_origin + final_origin)\n pivot_point[~parallel_mask] = intersect_rays(\n initial_origin[~parallel_mask],\n initial_look[~parallel_mask],\n final_origin[~parallel_mask],\n final_look[~parallel_mask],\n )\n\n # Convert to pivot parameters.\n pivot_frame = generate_rotation_coordinate_frame(initial_look, final_look, eps=eps)\n initial_params = extrinsics_to_pivot_parameters(initial, pivot_frame, pivot_point)\n final_params = extrinsics_to_pivot_parameters(final, pivot_frame, pivot_point)\n\n # Interpolate the pivot parameters.\n interpolated_params = interpolate_pivot_parameters(initial_params, final_params, t)\n\n # Convert back.\n return pivot_parameters_to_extrinsics(\n interpolated_params.type(torch.float32),\n rearrange(pivot_frame, \"... i j -> ... () i j\").type(torch.float32),\n rearrange(pivot_point, \"... xyz -> ... () xyz\").type(torch.float32),\n )"
},
{
"identifier": "interpolate_intrinsics",
"path": "src/visualization/camera_trajectory/interpolation.py",
"snippet": "def interpolate_intrinsics(\n initial: Float[Tensor, \"*#batch 3 3\"],\n final: Float[Tensor, \"*#batch 3 3\"],\n t: Float[Tensor, \" time_step\"],\n) -> Float[Tensor, \"*batch time_step 3 3\"]:\n initial = rearrange(initial, \"... i j -> ... () i j\")\n final = rearrange(final, \"... i j -> ... () i j\")\n t = rearrange(t, \"t -> t () ()\")\n return initial + (final - initial) * t"
},
{
"identifier": "generate_wobble",
"path": "src/visualization/camera_trajectory/wobble.py",
"snippet": "@torch.no_grad()\ndef generate_wobble(\n extrinsics: Float[Tensor, \"*#batch 4 4\"],\n radius: Float[Tensor, \"*#batch\"],\n t: Float[Tensor, \" time_step\"],\n) -> Float[Tensor, \"*batch time_step 4 4\"]:\n tf = generate_wobble_transformation(radius, t)\n return rearrange(extrinsics, \"... i j -> ... () i j\") @ tf"
},
{
"identifier": "generate_wobble_transformation",
"path": "src/visualization/camera_trajectory/wobble.py",
"snippet": "@torch.no_grad()\ndef generate_wobble_transformation(\n radius: Float[Tensor, \"*#batch\"],\n t: Float[Tensor, \" time_step\"],\n num_rotations: int = 1,\n scale_radius_with_t: bool = True,\n) -> Float[Tensor, \"*batch time_step 4 4\"]:\n # Generate a translation in the image plane.\n tf = torch.eye(4, dtype=torch.float32, device=t.device)\n tf = tf.broadcast_to((*radius.shape, t.shape[0], 4, 4)).clone()\n radius = radius[..., None]\n if scale_radius_with_t:\n radius = radius * t\n tf[..., 0, 3] = torch.sin(2 * torch.pi * num_rotations * t) * radius\n tf[..., 1, 3] = -torch.cos(2 * torch.pi * num_rotations * t) * radius\n return tf"
},
{
"identifier": "apply_color_map_to_image",
"path": "src/visualization/color_map.py",
"snippet": "def apply_color_map_to_image(\n image: Float[Tensor, \"*batch height width\"],\n color_map: str = \"inferno\",\n) -> Float[Tensor, \"*batch 3 height with\"]:\n image = apply_color_map(image, color_map)\n return rearrange(image, \"... h w c -> ... c h w\")"
},
{
"identifier": "add_border",
"path": "src/visualization/layout.py",
"snippet": "def add_border(\n image: Float[Tensor, \"channel height width\"],\n border: int = 8,\n color: Color = 1,\n) -> Float[Tensor, \"channel new_height new_width\"]:\n color = _sanitize_color(color).to(image)\n c, h, w = image.shape\n result = torch.empty(\n (c, h + 2 * border, w + 2 * border), dtype=torch.float32, device=image.device\n )\n result[:] = color[:, None, None]\n result[:, border : h + border, border : w + border] = image\n return result"
},
{
"identifier": "hcat",
"path": "src/visualization/layout.py",
"snippet": "def hcat(\n *images: Iterable[Float[Tensor, \"channel _ _\"]],\n align: Literal[\"start\", \"center\", \"end\", \"top\", \"bottom\"] = \"start\",\n gap: int = 8,\n gap_color: Color = 1,\n):\n \"\"\"Shorthand for a horizontal linear concatenation.\"\"\"\n return cat(\n \"horizontal\",\n *images,\n align={\n \"start\": \"start\",\n \"center\": \"center\",\n \"end\": \"end\",\n \"top\": \"start\",\n \"bottom\": \"end\",\n }[align],\n gap=gap,\n gap_color=gap_color,\n )"
},
{
"identifier": "vcat",
"path": "src/visualization/layout.py",
"snippet": "def vcat(\n *images: Iterable[Float[Tensor, \"channel _ _\"]],\n align: Literal[\"start\", \"center\", \"end\", \"left\", \"right\"] = \"start\",\n gap: int = 8,\n gap_color: Color = 1,\n):\n \"\"\"Shorthand for a horizontal linear concatenation.\"\"\"\n return cat(\n \"vertical\",\n *images,\n align={\n \"start\": \"start\",\n \"center\": \"center\",\n \"end\": \"end\",\n \"left\": \"start\",\n \"right\": \"end\",\n }[align],\n gap=gap,\n gap_color=gap_color,\n )"
},
{
"identifier": "render_cameras",
"path": "src/visualization/validation_in_3d.py",
"snippet": "def render_cameras(batch: dict, resolution: int) -> Float[Tensor, \"3 3 height width\"]:\n # Define colors for context and target views.\n num_context_views = batch[\"context\"][\"extrinsics\"].shape[1]\n num_target_views = batch[\"target\"][\"extrinsics\"].shape[1]\n color = torch.ones(\n (num_target_views + num_context_views, 3),\n dtype=torch.float32,\n device=batch[\"target\"][\"extrinsics\"].device,\n )\n color[num_context_views:, 1:] = 0\n\n return draw_cameras(\n resolution,\n torch.cat(\n (batch[\"context\"][\"extrinsics\"][0], batch[\"target\"][\"extrinsics\"][0])\n ),\n torch.cat(\n (batch[\"context\"][\"intrinsics\"][0], batch[\"target\"][\"intrinsics\"][0])\n ),\n color,\n torch.cat((batch[\"context\"][\"near\"][0], batch[\"target\"][\"near\"][0])),\n torch.cat((batch[\"context\"][\"far\"][0], batch[\"target\"][\"far\"][0])),\n )"
},
{
"identifier": "render_projections",
"path": "src/visualization/validation_in_3d.py",
"snippet": "def render_projections(\n gaussians: Gaussians,\n resolution: int,\n margin: float = 0.1,\n draw_label: bool = True,\n extra_label: str = \"\",\n) -> Float[Tensor, \"batch 3 3 height width\"]:\n device = gaussians.means.device\n b, _, _ = gaussians.means.shape\n\n # Compute the minima and maxima of the scene.\n minima = gaussians.means.min(dim=1).values\n maxima = gaussians.means.max(dim=1).values\n scene_minima, scene_maxima = compute_equal_aabb_with_margin(\n minima, maxima, margin=margin\n )\n\n projections = []\n for look_axis in range(3):\n right_axis = (look_axis + 1) % 3\n down_axis = (look_axis + 2) % 3\n\n # Define the extrinsics for rendering.\n extrinsics = torch.zeros((b, 4, 4), dtype=torch.float32, device=device)\n extrinsics[:, right_axis, 0] = 1\n extrinsics[:, down_axis, 1] = 1\n extrinsics[:, look_axis, 2] = 1\n extrinsics[:, right_axis, 3] = 0.5 * (\n scene_minima[:, right_axis] + scene_maxima[:, right_axis]\n )\n extrinsics[:, down_axis, 3] = 0.5 * (\n scene_minima[:, down_axis] + scene_maxima[:, down_axis]\n )\n extrinsics[:, look_axis, 3] = scene_minima[:, look_axis]\n extrinsics[:, 3, 3] = 1\n\n # Define the intrinsics for rendering.\n extents = scene_maxima - scene_minima\n far = extents[:, look_axis]\n near = torch.zeros_like(far)\n width = extents[:, right_axis]\n height = extents[:, down_axis]\n\n projection = render_cuda_orthographic(\n extrinsics,\n width,\n height,\n near,\n far,\n (resolution, resolution),\n torch.zeros((b, 3), dtype=torch.float32, device=device),\n gaussians.means,\n gaussians.covariances,\n gaussians.harmonics,\n gaussians.opacities,\n fov_degrees=10.0,\n )\n if draw_label:\n right_axis_name = \"XYZ\"[right_axis]\n down_axis_name = \"XYZ\"[down_axis]\n label = f\"{right_axis_name}{down_axis_name} Projection {extra_label}\"\n projection = torch.stack([add_label(x, label) for x in projection])\n\n projections.append(projection)\n\n return torch.stack(pad(projections), dim=1)"
},
{
"identifier": "Decoder",
"path": "src/model/decoder/decoder.py",
"snippet": "class DecoderOutput:\nclass Decoder(nn.Module, ABC, Generic[T]):\nT = TypeVar(\"T\")\n def __init__(self, cfg: T, dataset_cfg: DatasetCfg) -> None:\n def forward(\n self,\n gaussians: Gaussians,\n extrinsics: Float[Tensor, \"batch view 4 4\"],\n intrinsics: Float[Tensor, \"batch view 3 3\"],\n near: Float[Tensor, \"batch view\"],\n far: Float[Tensor, \"batch view\"],\n image_shape: tuple[int, int],\n depth_mode: DepthRenderingMode | None = None,\n ) -> DecoderOutput:"
},
{
"identifier": "Encoder",
"path": "src/model/encoder/encoder.py",
"snippet": "class Encoder(nn.Module, ABC, Generic[T]):\n cfg: T\n\n def __init__(self, cfg: T) -> None:\n super().__init__()\n self.cfg = cfg\n\n @abstractmethod\n def forward(\n self,\n context: BatchedViews,\n deterministic: bool,\n ) -> Gaussians:\n pass\n\n def get_data_shim(self) -> DataShim:\n \"\"\"The default shim doesn't modify the batch.\"\"\"\n return lambda x: x"
},
{
"identifier": "EncoderVisualizer",
"path": "src/model/encoder/visualization/encoder_visualizer.py",
"snippet": "class EncoderVisualizer(ABC, Generic[T_cfg, T_encoder]):\n cfg: T_cfg\n encoder: T_encoder\n\n def __init__(self, cfg: T_cfg, encoder: T_encoder) -> None:\n self.cfg = cfg\n self.encoder = encoder\n\n @abstractmethod\n def visualize(\n self,\n context: dict,\n global_step: int,\n ) -> dict[str, Float[Tensor, \"3 _ _\"]]:\n pass"
}
] | from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Protocol, runtime_checkable
from einops import pack, rearrange, repeat
from jaxtyping import Float
from pytorch_lightning import LightningModule
from pytorch_lightning.loggers.wandb import WandbLogger
from pytorch_lightning.utilities import rank_zero_only
from torch import Tensor, nn, optim
from ..dataset.data_module import get_data_shim
from ..dataset.types import BatchedExample
from ..evaluation.metrics import compute_lpips, compute_psnr, compute_ssim
from ..global_cfg import get_cfg
from ..loss import Loss
from ..misc.benchmarker import Benchmarker
from ..misc.image_io import prep_image, save_image
from ..misc.LocalLogger import LOG_PATH, LocalLogger
from ..misc.step_tracker import StepTracker
from ..visualization.annotation import add_label
from ..visualization.camera_trajectory.interpolation import (
interpolate_extrinsics,
interpolate_intrinsics,
)
from ..visualization.camera_trajectory.wobble import (
generate_wobble,
generate_wobble_transformation,
)
from ..visualization.color_map import apply_color_map_to_image
from ..visualization.layout import add_border, hcat, vcat
from ..visualization.validation_in_3d import render_cameras, render_projections
from .decoder.decoder import Decoder, DepthRenderingMode
from .encoder import Encoder
from .encoder.visualization.encoder_visualizer import EncoderVisualizer
import moviepy.editor as mpy
import torch
import wandb | 7,684 | self.render_video_interpolation_exaggerated(batch)
@rank_zero_only
def render_video_wobble(self, batch: BatchedExample) -> None:
# Two views are needed to get the wobble radius.
_, v, _, _ = batch["context"]["extrinsics"].shape
if v != 2:
return
def trajectory_fn(t):
origin_a = batch["context"]["extrinsics"][:, 0, :3, 3]
origin_b = batch["context"]["extrinsics"][:, 1, :3, 3]
delta = (origin_a - origin_b).norm(dim=-1)
extrinsics = generate_wobble(
batch["context"]["extrinsics"][:, 0],
delta * 0.25,
t,
)
intrinsics = repeat(
batch["context"]["intrinsics"][:, 0],
"b i j -> b v i j",
v=t.shape[0],
)
return extrinsics, intrinsics
return self.render_video_generic(batch, trajectory_fn, "wobble", num_frames=60)
@rank_zero_only
def render_video_interpolation(self, batch: BatchedExample) -> None:
_, v, _, _ = batch["context"]["extrinsics"].shape
def trajectory_fn(t):
extrinsics = interpolate_extrinsics(
batch["context"]["extrinsics"][0, 0],
batch["context"]["extrinsics"][0, 1]
if v == 2
else batch["target"]["extrinsics"][0, 0],
t,
)
intrinsics = interpolate_intrinsics(
batch["context"]["intrinsics"][0, 0],
batch["context"]["intrinsics"][0, 1]
if v == 2
else batch["target"]["intrinsics"][0, 0],
t,
)
return extrinsics[None], intrinsics[None]
return self.render_video_generic(batch, trajectory_fn, "rgb")
@rank_zero_only
def render_video_interpolation_exaggerated(self, batch: BatchedExample) -> None:
# Two views are needed to get the wobble radius.
_, v, _, _ = batch["context"]["extrinsics"].shape
if v != 2:
return
def trajectory_fn(t):
origin_a = batch["context"]["extrinsics"][:, 0, :3, 3]
origin_b = batch["context"]["extrinsics"][:, 1, :3, 3]
delta = (origin_a - origin_b).norm(dim=-1)
tf = generate_wobble_transformation(
delta * 0.5,
t,
5,
scale_radius_with_t=False,
)
extrinsics = interpolate_extrinsics(
batch["context"]["extrinsics"][0, 0],
batch["context"]["extrinsics"][0, 1]
if v == 2
else batch["target"]["extrinsics"][0, 0],
t * 5 - 2,
)
intrinsics = interpolate_intrinsics(
batch["context"]["intrinsics"][0, 0],
batch["context"]["intrinsics"][0, 1]
if v == 2
else batch["target"]["intrinsics"][0, 0],
t * 5 - 2,
)
return extrinsics @ tf, intrinsics[None]
return self.render_video_generic(
batch,
trajectory_fn,
"interpolation_exagerrated",
num_frames=300,
smooth=False,
loop_reverse=False,
)
@rank_zero_only
def render_video_generic(
self,
batch: BatchedExample,
trajectory_fn: TrajectoryFn,
name: str,
num_frames: int = 30,
smooth: bool = True,
loop_reverse: bool = True,
) -> None:
# Render probabilistic estimate of scene.
gaussians_prob = self.encoder(batch["context"], self.global_step, False)
gaussians_det = self.encoder(batch["context"], self.global_step, True)
t = torch.linspace(0, 1, num_frames, dtype=torch.float32, device=self.device)
if smooth:
t = (torch.cos(torch.pi * (t + 1)) + 1) / 2
extrinsics, intrinsics = trajectory_fn(t)
_, _, _, h, w = batch["context"]["image"].shape
# Color-map the result.
def depth_map(result):
near = result[result > 0][:16_000_000].quantile(0.01).log()
far = result.view(-1)[:16_000_000].quantile(0.99).log()
result = result.log()
result = 1 - (result - near) / (far - near)
|
@dataclass
class OptimizerCfg:
lr: float
warm_up_steps: int
@dataclass
class TestCfg:
output_path: Path
@dataclass
class TrainCfg:
depth_mode: DepthRenderingMode | None
extended_visualization: bool
@runtime_checkable
class TrajectoryFn(Protocol):
def __call__(
self,
t: Float[Tensor, " t"],
) -> tuple[
Float[Tensor, "batch view 4 4"], # extrinsics
Float[Tensor, "batch view 3 3"], # intrinsics
]:
pass
class ModelWrapper(LightningModule):
logger: Optional[WandbLogger]
encoder: nn.Module
encoder_visualizer: Optional[EncoderVisualizer]
decoder: Decoder
losses: nn.ModuleList
optimizer_cfg: OptimizerCfg
test_cfg: TestCfg
train_cfg: TrainCfg
step_tracker: StepTracker | None
def __init__(
self,
optimizer_cfg: OptimizerCfg,
test_cfg: TestCfg,
train_cfg: TrainCfg,
encoder: Encoder,
encoder_visualizer: Optional[EncoderVisualizer],
decoder: Decoder,
losses: list[Loss],
step_tracker: StepTracker | None,
) -> None:
super().__init__()
self.optimizer_cfg = optimizer_cfg
self.test_cfg = test_cfg
self.train_cfg = train_cfg
self.step_tracker = step_tracker
# Set up the model.
self.encoder = encoder
self.encoder_visualizer = encoder_visualizer
self.decoder = decoder
self.data_shim = get_data_shim(self.encoder)
self.losses = nn.ModuleList(losses)
# This is used for testing.
self.benchmarker = Benchmarker()
def training_step(self, batch, batch_idx):
batch: BatchedExample = self.data_shim(batch)
_, _, _, h, w = batch["target"]["image"].shape
# Run the model.
gaussians = self.encoder(batch["context"], self.global_step, False)
output = self.decoder.forward(
gaussians,
batch["target"]["extrinsics"],
batch["target"]["intrinsics"],
batch["target"]["near"],
batch["target"]["far"],
(h, w),
depth_mode=self.train_cfg.depth_mode,
)
target_gt = batch["target"]["image"]
# Compute metrics.
psnr_probabilistic = compute_psnr(
rearrange(target_gt, "b v c h w -> (b v) c h w"),
rearrange(output.color, "b v c h w -> (b v) c h w"),
)
self.log("train/psnr_probabilistic", psnr_probabilistic.mean())
# Compute and log loss.
total_loss = 0
for loss_fn in self.losses:
loss = loss_fn.forward(output, batch, gaussians, self.global_step)
self.log(f"loss/{loss_fn.name}", loss)
total_loss = total_loss + loss
self.log("loss/total", total_loss)
if self.global_rank == 0:
print(
f"train step {self.global_step}; "
f"scene = {batch['scene']}; "
f"context = {batch['context']['index'].tolist()}; "
f"loss = {total_loss:.6f}"
)
# Tell the data loader processes about the current step.
if self.step_tracker is not None:
self.step_tracker.set_step(self.global_step)
return total_loss
def test_step(self, batch, batch_idx):
batch: BatchedExample = self.data_shim(batch)
b, v, _, h, w = batch["target"]["image"].shape
assert b == 1
if batch_idx % 100 == 0:
print(f"Test step {batch_idx:0>6}.")
# Render Gaussians.
with self.benchmarker.time("encoder"):
gaussians = self.encoder(
batch["context"],
self.global_step,
deterministic=False,
)
with self.benchmarker.time("decoder", num_calls=v):
output = self.decoder.forward(
gaussians,
batch["target"]["extrinsics"],
batch["target"]["intrinsics"],
batch["target"]["near"],
batch["target"]["far"],
(h, w),
)
# Save images.
(scene,) = batch["scene"]
name = get_cfg()["wandb"]["name"]
path = self.test_cfg.output_path / name
for index, color in zip(batch["target"]["index"][0], output.color[0]):
save_image(color, path / scene / f"color/{index:0>6}.png")
def on_test_end(self) -> None:
name = get_cfg()["wandb"]["name"]
self.benchmarker.dump(self.test_cfg.output_path / name / "benchmark.json")
self.benchmarker.dump_memory(
self.test_cfg.output_path / name / "peak_memory.json"
)
@rank_zero_only
def validation_step(self, batch, batch_idx):
batch: BatchedExample = self.data_shim(batch)
if self.global_rank == 0:
print(
f"validation step {self.global_step}; "
f"scene = {batch['scene']}; "
f"context = {batch['context']['index'].tolist()}"
)
# Render Gaussians.
b, _, _, h, w = batch["target"]["image"].shape
assert b == 1
gaussians_probabilistic = self.encoder(
batch["context"],
self.global_step,
deterministic=False,
)
output_probabilistic = self.decoder.forward(
gaussians_probabilistic,
batch["target"]["extrinsics"],
batch["target"]["intrinsics"],
batch["target"]["near"],
batch["target"]["far"],
(h, w),
)
rgb_probabilistic = output_probabilistic.color[0]
gaussians_deterministic = self.encoder(
batch["context"],
self.global_step,
deterministic=True,
)
output_deterministic = self.decoder.forward(
gaussians_deterministic,
batch["target"]["extrinsics"],
batch["target"]["intrinsics"],
batch["target"]["near"],
batch["target"]["far"],
(h, w),
)
rgb_deterministic = output_deterministic.color[0]
# Compute validation metrics.
rgb_gt = batch["target"]["image"][0]
for tag, rgb in zip(
("deterministic", "probabilistic"), (rgb_deterministic, rgb_probabilistic)
):
psnr = compute_psnr(rgb_gt, rgb).mean()
self.log(f"val/psnr_{tag}", psnr)
lpips = compute_lpips(rgb_gt, rgb).mean()
self.log(f"val/lpips_{tag}", lpips)
ssim = compute_ssim(rgb_gt, rgb).mean()
self.log(f"val/ssim_{tag}", ssim)
# Construct comparison image.
comparison = hcat(
add_label(vcat(*batch["context"]["image"][0]), "Context"),
add_label(vcat(*rgb_gt), "Target (Ground Truth)"),
add_label(vcat(*rgb_probabilistic), "Target (Probabilistic)"),
add_label(vcat(*rgb_deterministic), "Target (Deterministic)"),
)
self.logger.log_image(
"comparison",
[prep_image(add_border(comparison))],
step=self.global_step,
caption=batch["scene"],
)
# Render projections and construct projection image.
# These are disabled for now, since RE10k scenes are effectively unbounded.
projections = vcat(
hcat(
*render_projections(
gaussians_probabilistic,
256,
extra_label="(Probabilistic)",
)[0]
),
hcat(
*render_projections(
gaussians_deterministic, 256, extra_label="(Deterministic)"
)[0]
),
align="left",
)
self.logger.log_image(
"projection",
[prep_image(add_border(projections))],
step=self.global_step,
)
# Draw cameras.
cameras = hcat(*render_cameras(batch, 256))
self.logger.log_image(
"cameras", [prep_image(add_border(cameras))], step=self.global_step
)
if self.encoder_visualizer is not None:
for k, image in self.encoder_visualizer.visualize(
batch["context"], self.global_step
).items():
self.logger.log_image(k, [prep_image(image)], step=self.global_step)
# Run video validation step.
self.render_video_interpolation(batch)
self.render_video_wobble(batch)
if self.train_cfg.extended_visualization:
self.render_video_interpolation_exaggerated(batch)
@rank_zero_only
def render_video_wobble(self, batch: BatchedExample) -> None:
# Two views are needed to get the wobble radius.
_, v, _, _ = batch["context"]["extrinsics"].shape
if v != 2:
return
def trajectory_fn(t):
origin_a = batch["context"]["extrinsics"][:, 0, :3, 3]
origin_b = batch["context"]["extrinsics"][:, 1, :3, 3]
delta = (origin_a - origin_b).norm(dim=-1)
extrinsics = generate_wobble(
batch["context"]["extrinsics"][:, 0],
delta * 0.25,
t,
)
intrinsics = repeat(
batch["context"]["intrinsics"][:, 0],
"b i j -> b v i j",
v=t.shape[0],
)
return extrinsics, intrinsics
return self.render_video_generic(batch, trajectory_fn, "wobble", num_frames=60)
@rank_zero_only
def render_video_interpolation(self, batch: BatchedExample) -> None:
_, v, _, _ = batch["context"]["extrinsics"].shape
def trajectory_fn(t):
extrinsics = interpolate_extrinsics(
batch["context"]["extrinsics"][0, 0],
batch["context"]["extrinsics"][0, 1]
if v == 2
else batch["target"]["extrinsics"][0, 0],
t,
)
intrinsics = interpolate_intrinsics(
batch["context"]["intrinsics"][0, 0],
batch["context"]["intrinsics"][0, 1]
if v == 2
else batch["target"]["intrinsics"][0, 0],
t,
)
return extrinsics[None], intrinsics[None]
return self.render_video_generic(batch, trajectory_fn, "rgb")
@rank_zero_only
def render_video_interpolation_exaggerated(self, batch: BatchedExample) -> None:
# Two views are needed to get the wobble radius.
_, v, _, _ = batch["context"]["extrinsics"].shape
if v != 2:
return
def trajectory_fn(t):
origin_a = batch["context"]["extrinsics"][:, 0, :3, 3]
origin_b = batch["context"]["extrinsics"][:, 1, :3, 3]
delta = (origin_a - origin_b).norm(dim=-1)
tf = generate_wobble_transformation(
delta * 0.5,
t,
5,
scale_radius_with_t=False,
)
extrinsics = interpolate_extrinsics(
batch["context"]["extrinsics"][0, 0],
batch["context"]["extrinsics"][0, 1]
if v == 2
else batch["target"]["extrinsics"][0, 0],
t * 5 - 2,
)
intrinsics = interpolate_intrinsics(
batch["context"]["intrinsics"][0, 0],
batch["context"]["intrinsics"][0, 1]
if v == 2
else batch["target"]["intrinsics"][0, 0],
t * 5 - 2,
)
return extrinsics @ tf, intrinsics[None]
return self.render_video_generic(
batch,
trajectory_fn,
"interpolation_exagerrated",
num_frames=300,
smooth=False,
loop_reverse=False,
)
@rank_zero_only
def render_video_generic(
self,
batch: BatchedExample,
trajectory_fn: TrajectoryFn,
name: str,
num_frames: int = 30,
smooth: bool = True,
loop_reverse: bool = True,
) -> None:
# Render probabilistic estimate of scene.
gaussians_prob = self.encoder(batch["context"], self.global_step, False)
gaussians_det = self.encoder(batch["context"], self.global_step, True)
t = torch.linspace(0, 1, num_frames, dtype=torch.float32, device=self.device)
if smooth:
t = (torch.cos(torch.pi * (t + 1)) + 1) / 2
extrinsics, intrinsics = trajectory_fn(t)
_, _, _, h, w = batch["context"]["image"].shape
# Color-map the result.
def depth_map(result):
near = result[result > 0][:16_000_000].quantile(0.01).log()
far = result.view(-1)[:16_000_000].quantile(0.99).log()
result = result.log()
result = 1 - (result - near) / (far - near) | return apply_color_map_to_image(result, "turbo") | 18 | 2023-12-20 19:45:59+00:00 | 12k |
hutaiHang/Faster-Diffusion | if_demo.py | [
{
"identifier": "register_if1",
"path": "utils_if.py",
"snippet": "def register_if1(pipe):\r\n def new_call(self):\r\n @torch.no_grad()\r\n def call(\r\n prompt: Union[str, List[str]] = None,\r\n num_inference_steps: int = 100,\r\n timesteps: List[int] = None,\r\n guidance_scale: float = 7.0,\r\n negative_prompt: Optional[Union[str, List[str]]] = None,\r\n num_images_per_prompt: Optional[int] = 1,\r\n height: Optional[int] = None,\r\n width: Optional[int] = None,\r\n eta: float = 0.0,\r\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\r\n prompt_embeds: Optional[torch.FloatTensor] = None,\r\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\r\n output_type: Optional[str] = \"pil\",\r\n return_dict: bool = True,\r\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\r\n callback_steps: int = 1,\r\n clean_caption: bool = True,\r\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\r\n ):\r\n # 1. Check inputs. Raise error if not correct\r\n self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)\r\n\r\n # 2. Define call parameters\r\n height = height or self.unet.config.sample_size\r\n width = width or self.unet.config.sample_size\r\n\r\n if prompt is not None and isinstance(prompt, str):\r\n batch_size = 1\r\n elif prompt is not None and isinstance(prompt, list):\r\n batch_size = len(prompt)\r\n else:\r\n batch_size = prompt_embeds.shape[0]\r\n\r\n device = self._execution_device\r\n\r\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\r\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\r\n # corresponds to doing no classifier free guidance.\r\n do_classifier_free_guidance = guidance_scale > 1.0\r\n\r\n # 3. Encode input prompt\r\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\r\n prompt,\r\n do_classifier_free_guidance,\r\n num_images_per_prompt=num_images_per_prompt,\r\n device=device,\r\n negative_prompt=negative_prompt,\r\n prompt_embeds=prompt_embeds,\r\n negative_prompt_embeds=negative_prompt_embeds,\r\n clean_caption=clean_caption,\r\n )\r\n\r\n if do_classifier_free_guidance:\r\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\r\n\r\n # 4. Prepare timesteps\r\n if timesteps is not None:\r\n self.scheduler.set_timesteps(timesteps=timesteps, device=device)\r\n timesteps = self.scheduler.timesteps\r\n num_inference_steps = len(timesteps)\r\n else:\r\n self.scheduler.set_timesteps(num_inference_steps, device=device)\r\n timesteps = self.scheduler.timesteps\r\n\r\n # 5. Prepare intermediate images\r\n intermediate_images = self.prepare_intermediate_images(\r\n batch_size * num_images_per_prompt,\r\n self.unet.config.in_channels,\r\n height,\r\n width,\r\n prompt_embeds.dtype,\r\n device,\r\n generator,\r\n )\r\n\r\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\r\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\r\n\r\n # HACK: see comment in `enable_model_cpu_offload`\r\n if hasattr(self, \"text_encoder_offload_hook\") and self.text_encoder_offload_hook is not None:\r\n self.text_encoder_offload_hook.offload()\r\n\r\n # 7. Denoising loop\r\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\r\n\r\n all_timesteps = len(timesteps)\r\n curr_step = 0\r\n st = time.time()\r\n while curr_step<all_timesteps:\r\n refister_time(self.unet, curr_step)\r\n\r\n time_ls = []\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n cond = curr_step > 85 or curr_step < 10 or (curr_step % 5 == 0)\r\n \r\n while (not cond) and (curr_step<all_timesteps):\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n cond = curr_step > 85 or curr_step < 10 or (curr_step % 5 == 0)\r\n\r\n # print('curr_step', curr_step, len(time_ls))\r\n model_input = (\r\n torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images\r\n )\r\n\r\n # predict the noise residual\r\n noise_pred = self.unet(\r\n model_input,\r\n time_ls,\r\n encoder_hidden_states=prompt_embeds,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n return_dict=False,\r\n )[0]\r\n\r\n # perform guidance\r\n if do_classifier_free_guidance:\r\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\r\n noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1)\r\n noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1)\r\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\r\n noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)\r\n\r\n if self.scheduler.config.variance_type not in [\"learned\", \"learned_range\"]:\r\n noise_pred, _ = noise_pred.split(model_input.shape[1], dim=1)\r\n\r\n # compute the previous noisy sample x_t -> x_t-1\r\n # intermediate_images = self.scheduler.step(\r\n # noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False\r\n # )[0]\r\n intermediate_images = multistep_pre(\r\n self, noise_pred, time_ls, intermediate_images)\r\n et = time.time()\r\n print('unet time: ', et-st, 'seconds')\r\n image = intermediate_images\r\n\r\n if output_type == \"pil\":\r\n # 8. Post-processing\r\n image = (image / 2 + 0.5).clamp(0, 1)\r\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\r\n\r\n # 9. Run safety checker\r\n image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)\r\n\r\n # 10. Convert to PIL\r\n image = self.numpy_to_pil(image)\r\n\r\n # 11. Apply watermark\r\n if self.watermarker is not None:\r\n image = self.watermarker.apply_watermark(image, self.unet.config.sample_size)\r\n elif output_type == \"pt\":\r\n nsfw_detected = None\r\n watermark_detected = None\r\n\r\n if hasattr(self, \"unet_offload_hook\") and self.unet_offload_hook is not None:\r\n self.unet_offload_hook.offload()\r\n else:\r\n # 8. Post-processing\r\n image = (image / 2 + 0.5).clamp(0, 1)\r\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\r\n\r\n # 9. Run safety checker\r\n image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)\r\n\r\n # Offload all models\r\n self.maybe_free_model_hooks()\r\n\r\n if not return_dict:\r\n return (image, nsfw_detected, watermark_detected)\r\n\r\n return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)\r\n return call\r\n pipe.call = new_call(pipe)\r"
},
{
"identifier": "register_if2",
"path": "utils_if.py",
"snippet": "def register_if2(pipe):\r\n def new_call(self):\r\n @torch.no_grad()\r\n def call(\r\n prompt: Union[str, List[str]] = None,\r\n height: int = None,\r\n width: int = None,\r\n image: Union[PIL.Image.Image, np.ndarray, torch.FloatTensor] = None,\r\n num_inference_steps: int = 50,\r\n timesteps: List[int] = None,\r\n guidance_scale: float = 4.0,\r\n negative_prompt: Optional[Union[str, List[str]]] = None,\r\n num_images_per_prompt: Optional[int] = 1,\r\n eta: float = 0.0,\r\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\r\n prompt_embeds: Optional[torch.FloatTensor] = None,\r\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\r\n output_type: Optional[str] = \"pil\",\r\n return_dict: bool = True,\r\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\r\n callback_steps: int = 1,\r\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\r\n noise_level: int = 250,\r\n clean_caption: bool = True,\r\n ):\r\n # 1. Check inputs. Raise error if not correct\r\n\r\n if prompt is not None and isinstance(prompt, str):\r\n batch_size = 1\r\n elif prompt is not None and isinstance(prompt, list):\r\n batch_size = len(prompt)\r\n else:\r\n batch_size = prompt_embeds.shape[0]\r\n\r\n self.check_inputs(\r\n prompt,\r\n image,\r\n batch_size,\r\n noise_level,\r\n callback_steps,\r\n negative_prompt,\r\n prompt_embeds,\r\n negative_prompt_embeds,\r\n )\r\n\r\n # 2. Define call parameters\r\n\r\n height = height or self.unet.config.sample_size\r\n width = width or self.unet.config.sample_size\r\n\r\n device = self._execution_device\r\n\r\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\r\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\r\n # corresponds to doing no classifier free guidance.\r\n do_classifier_free_guidance = guidance_scale > 1.0\r\n\r\n # 3. Encode input prompt\r\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\r\n prompt,\r\n do_classifier_free_guidance,\r\n num_images_per_prompt=num_images_per_prompt,\r\n device=device,\r\n negative_prompt=negative_prompt,\r\n prompt_embeds=prompt_embeds,\r\n negative_prompt_embeds=negative_prompt_embeds,\r\n clean_caption=clean_caption,\r\n )\r\n\r\n if do_classifier_free_guidance:\r\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\r\n\r\n # 4. Prepare timesteps\r\n if timesteps is not None:\r\n self.scheduler.set_timesteps(timesteps=timesteps, device=device)\r\n timesteps = self.scheduler.timesteps\r\n num_inference_steps = len(timesteps)\r\n else:\r\n self.scheduler.set_timesteps(num_inference_steps, device=device)\r\n timesteps = self.scheduler.timesteps\r\n\r\n # 5. Prepare intermediate images\r\n num_channels = self.unet.config.in_channels // 2\r\n intermediate_images = self.prepare_intermediate_images(\r\n batch_size * num_images_per_prompt,\r\n num_channels,\r\n height,\r\n width,\r\n prompt_embeds.dtype,\r\n device,\r\n generator,\r\n )\r\n\r\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\r\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\r\n\r\n # 7. Prepare upscaled image and noise level\r\n image = self.preprocess_image(image, num_images_per_prompt, device)\r\n upscaled = F.interpolate(image, (height, width), mode=\"bilinear\", align_corners=True)\r\n\r\n noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device)\r\n noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype)\r\n upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level)\r\n\r\n if do_classifier_free_guidance:\r\n noise_level = torch.cat([noise_level] * 2)\r\n\r\n # HACK: see comment in `enable_model_cpu_offload`\r\n if hasattr(self, \"text_encoder_offload_hook\") and self.text_encoder_offload_hook is not None:\r\n self.text_encoder_offload_hook.offload()\r\n\r\n # 8. Denoising loop\r\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\r\n\r\n all_timesteps = len(timesteps)\r\n curr_step = 0\r\n st = time.time()\r\n while curr_step<all_timesteps:\r\n refister_time(self.unet, curr_step)\r\n\r\n time_ls = []\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n cond = curr_step < 20 or curr_step > 40 or (curr_step % 2 == 0)\r\n \r\n while (not cond) and (curr_step<all_timesteps):\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n cond = curr_step < 20 or curr_step > 40 or (curr_step % 2 == 0)\r\n\r\n # print('curr_step', curr_step, len(time_ls))\r\n model_input = torch.cat([intermediate_images, upscaled], dim=1)\r\n\r\n model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input\r\n\r\n # predict the noise residual\r\n noise_pred = self.unet(\r\n model_input,\r\n time_ls,\r\n encoder_hidden_states=prompt_embeds,\r\n class_labels=noise_level,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n return_dict=False,\r\n )[0]\r\n\r\n # perform guidance\r\n if do_classifier_free_guidance:\r\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\r\n noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1)\r\n noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1)\r\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\r\n noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)\r\n\r\n if self.scheduler.config.variance_type not in [\"learned\", \"learned_range\"]:\r\n noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1)\r\n\r\n # # compute the previous noisy sample x_t -> x_t-1\r\n # intermediate_images = self.scheduler.step(\r\n # noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False\r\n # )[0]\r\n\r\n # compute the previous noisy sample x_t -> x_t-1\r\n # intermediate_images = self.scheduler.step(\r\n # noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False\r\n # )[0]\r\n intermediate_images = multistep_pre(\r\n self, noise_pred, time_ls, intermediate_images)\r\n \r\n et = time.time()\r\n print('unet time:', et - st, 'seconds')\r\n image = intermediate_images\r\n\r\n if output_type == \"pil\":\r\n # 9. Post-processing\r\n image = (image / 2 + 0.5).clamp(0, 1)\r\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\r\n\r\n # 10. Run safety checker\r\n image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)\r\n\r\n # 11. Convert to PIL\r\n image = self.numpy_to_pil(image)\r\n\r\n # 12. Apply watermark\r\n if self.watermarker is not None:\r\n self.watermarker.apply_watermark(image, self.unet.config.sample_size)\r\n elif output_type == \"pt\":\r\n nsfw_detected = None\r\n watermark_detected = None\r\n\r\n if hasattr(self, \"unet_offload_hook\") and self.unet_offload_hook is not None:\r\n self.unet_offload_hook.offload()\r\n else:\r\n # 9. Post-processing\r\n image = (image / 2 + 0.5).clamp(0, 1)\r\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\r\n\r\n # 10. Run safety checker\r\n image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)\r\n\r\n # Offload all models\r\n self.maybe_free_model_hooks()\r\n\r\n if not return_dict:\r\n return (image, nsfw_detected, watermark_detected)\r\n\r\n return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)\r\n\r\n return call\r\n pipe.call = new_call(pipe)\r"
},
{
"identifier": "register_if3",
"path": "utils_if.py",
"snippet": "def register_if3(pipe):\r\n def new_call(self):\r\n @torch.no_grad()\r\n def call(\r\n prompt: Union[str, List[str]] = None,\r\n image = None,\r\n num_inference_steps: int = 75,\r\n guidance_scale: float = 9.0,\r\n noise_level: int = 20,\r\n negative_prompt: Optional[Union[str, List[str]]] = None,\r\n num_images_per_prompt: Optional[int] = 1,\r\n eta: float = 0.0,\r\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\r\n latents: Optional[torch.FloatTensor] = None,\r\n prompt_embeds: Optional[torch.FloatTensor] = None,\r\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\r\n output_type: Optional[str] = \"pil\",\r\n return_dict: bool = True,\r\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\r\n callback_steps: int = 1,\r\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\r\n clip_skip: int = None,\r\n ):\r\n # 1. Check inputs\r\n self.check_inputs(\r\n prompt,\r\n image,\r\n noise_level,\r\n callback_steps,\r\n negative_prompt,\r\n prompt_embeds,\r\n negative_prompt_embeds,\r\n )\r\n\r\n if image is None:\r\n raise ValueError(\"`image` input cannot be undefined.\")\r\n\r\n # 2. Define call parameters\r\n if prompt is not None and isinstance(prompt, str):\r\n batch_size = 1\r\n elif prompt is not None and isinstance(prompt, list):\r\n batch_size = len(prompt)\r\n else:\r\n batch_size = prompt_embeds.shape[0]\r\n\r\n device = self._execution_device\r\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\r\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\r\n # corresponds to doing no classifier free guidance.\r\n do_classifier_free_guidance = guidance_scale > 1.0\r\n\r\n # 3. Encode input prompt\r\n text_encoder_lora_scale = (\r\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\r\n )\r\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\r\n prompt,\r\n device,\r\n num_images_per_prompt,\r\n do_classifier_free_guidance,\r\n negative_prompt,\r\n prompt_embeds=prompt_embeds,\r\n negative_prompt_embeds=negative_prompt_embeds,\r\n lora_scale=text_encoder_lora_scale,\r\n clip_skip=clip_skip,\r\n )\r\n # For classifier free guidance, we need to do two forward passes.\r\n # Here we concatenate the unconditional and text embeddings into a single batch\r\n # to avoid doing two forward passes\r\n if do_classifier_free_guidance:\r\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\r\n\r\n # 4. Preprocess image\r\n image = self.image_processor.preprocess(image)\r\n image = image.to(dtype=prompt_embeds.dtype, device=device)\r\n\r\n # 5. set timesteps\r\n self.scheduler.set_timesteps(num_inference_steps, device=device)\r\n timesteps = self.scheduler.timesteps\r\n\r\n # 5. Add noise to image\r\n noise_level = torch.tensor([noise_level], dtype=torch.long, device=device)\r\n noise = randn_tensor(image.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)\r\n image = self.low_res_scheduler.add_noise(image, noise, noise_level)\r\n\r\n batch_multiplier = 2 if do_classifier_free_guidance else 1\r\n image = torch.cat([image] * batch_multiplier * num_images_per_prompt)\r\n noise_level = torch.cat([noise_level] * image.shape[0])\r\n\r\n # 6. Prepare latent variables\r\n height, width = image.shape[2:]\r\n num_channels_latents = self.vae.config.latent_channels\r\n latents = self.prepare_latents(\r\n batch_size * num_images_per_prompt,\r\n num_channels_latents,\r\n height,\r\n width,\r\n prompt_embeds.dtype,\r\n device,\r\n generator,\r\n latents,\r\n )\r\n\r\n # 7. Check that sizes of image and latents match\r\n num_channels_image = image.shape[1]\r\n if num_channels_latents + num_channels_image != self.unet.config.in_channels:\r\n raise ValueError(\r\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\r\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\r\n f\" `num_channels_image`: {num_channels_image} \"\r\n f\" = {num_channels_latents+num_channels_image}. Please verify the config of\"\r\n \" `pipeline.unet` or your `image` input.\"\r\n )\r\n\r\n # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\r\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\r\n\r\n # 9. Denoising loop\r\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\r\n\r\n\r\n all_timesteps = len(timesteps)\r\n curr_step = 0\r\n st = time.time()\r\n while curr_step<all_timesteps:\r\n refister_time(self.unet, curr_step)\r\n\r\n time_ls = []\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n\r\n ipow = int(np.sqrt(9 + 8*curr_step))\r\n cond = ipow * ipow == (9 + 8 * curr_step)\r\n # cond = curr_step in [0, 1, 2, 3, 5, 10, 15, 25, 35,45,55,65]\r\n while (not cond) and (curr_step<all_timesteps):\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n\r\n ipow = int(np.sqrt(9 + 8*curr_step))\r\n cond = ipow * ipow == (9 + 8 * curr_step)\r\n # cond = curr_step in [0, 1, 2, 3, 5, 10, 15, 25, 35,45,55,65]\r\n\r\n # print(curr_step, len(time_ls))\r\n # expand the latents if we are doing classifier free guidance\r\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\r\n\r\n # concat latents, mask, masked_image_latents in the channel dimension\r\n latent_model_input = torch.cat([latent_model_input, image], dim=1)\r\n\r\n input = (latent_model_input,time_ls[0],\r\n prompt_embeds,noise_level, None, None,\r\n cross_attention_kwargs,None, None, None,None,None,\r\n False)\r\n\r\n # predict the noise residual\r\n noise_pred = self.unet(\r\n latent_model_input,\r\n time_ls,\r\n encoder_hidden_states=prompt_embeds,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n class_labels=noise_level,\r\n return_dict=False,\r\n )[0]\r\n\r\n # perform guidance\r\n if do_classifier_free_guidance:\r\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\r\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\r\n\r\n # compute the previous noisy sample x_t -> x_t-1\r\n latents = multistep_pre(self, noise_pred, time_ls, latents)\r\n \r\n et = time.time()\r\n print('unet time:', et - st, 'seconds')\r\n\r\n if not output_type == \"latent\":\r\n # make sure the VAE is in float32 mode, as it overflows in float16\r\n needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast\r\n\r\n if needs_upcasting:\r\n self.upcast_vae()\r\n\r\n # Ensure latents are always the same type as the VAE\r\n latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)\r\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\r\n\r\n # cast back to fp16 if needed\r\n if needs_upcasting:\r\n self.vae.to(dtype=torch.float16)\r\n\r\n image, has_nsfw_concept, _ = self.run_safety_checker(image, device, prompt_embeds.dtype)\r\n else:\r\n image = latents\r\n has_nsfw_concept = None\r\n\r\n if has_nsfw_concept is None:\r\n do_denormalize = [True] * image.shape[0]\r\n else:\r\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\r\n\r\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\r\n\r\n # 11. Apply watermark\r\n if output_type == \"pil\" and self.watermarker is not None:\r\n image = self.watermarker.apply_watermark(image)\r\n\r\n # Offload all models\r\n self.maybe_free_model_hooks()\r\n\r\n if not return_dict:\r\n return (image, has_nsfw_concept)\r\n\r\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)\r\n return call\r\n pipe.call = new_call(pipe)"
},
{
"identifier": "register_faster_forward",
"path": "utils_if.py",
"snippet": "def register_faster_forward(model, mod):\r\n def faster_forward(self):\r\n def forward(\r\n sample: torch.FloatTensor,\r\n timestep: Union[torch.Tensor, float, int],\r\n encoder_hidden_states: torch.Tensor,\r\n class_labels: Optional[torch.Tensor] = None,\r\n timestep_cond: Optional[torch.Tensor] = None,\r\n attention_mask: Optional[torch.Tensor] = None,\r\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\r\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\r\n mid_block_additional_residual: Optional[torch.Tensor] = None,\r\n return_dict: bool = True,\r\n ) -> Union[UNet2DConditionOutput, Tuple]:\r\n r\"\"\"\r\n Args:\r\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\r\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\r\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\r\n return_dict (`bool`, *optional*, defaults to `True`):\r\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\r\n cross_attention_kwargs (`dict`, *optional*):\r\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\r\n `self.processor` in\r\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\r\n\r\n Returns:\r\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\r\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\r\n returning a tuple, the first element is the sample tensor.\r\n \"\"\"\r\n # By default samples have to be AT least a multiple of the overall upsampling factor.\r\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).\r\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\r\n # on the fly if necessary.\r\n default_overall_up_factor = 2**self.num_upsamplers\r\n\r\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\r\n forward_upsample_size = False\r\n upsample_size = None\r\n\r\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\r\n logger.info(\"Forward upsample size to force interpolation output size.\")\r\n forward_upsample_size = True\r\n\r\n # prepare attention_mask\r\n if attention_mask is not None:\r\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\r\n attention_mask = attention_mask.unsqueeze(1)\r\n\r\n # 0. center input if necessary\r\n if self.config.center_input_sample:\r\n sample = 2 * sample - 1.0\r\n\r\n # 1. time\r\n if isinstance(timestep, list):\r\n timesteps = timestep[0]\r\n step = len(timestep)\r\n else:\r\n timesteps = timestep\r\n step = 1\r\n if not torch.is_tensor(timesteps) and (not isinstance(timesteps,list)):\r\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\r\n # This would be a good case for the `match` statement (Python 3.10+)\r\n is_mps = sample.device.type == \"mps\"\r\n if isinstance(timestep, float):\r\n dtype = torch.float32 if is_mps else torch.float64\r\n else:\r\n dtype = torch.int32 if is_mps else torch.int64\r\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\r\n elif (not isinstance(timesteps,list)) and len(timesteps.shape) == 0:\r\n timesteps = timesteps[None].to(sample.device)\r\n \r\n if (not isinstance(timesteps,list)) and len(timesteps.shape) == 1:\r\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\r\n timesteps = timesteps.expand(sample.shape[0])\r\n elif isinstance(timesteps, list):\r\n #timesteps list, such as [981,961,941]\r\n timesteps = warpped_timestep(timesteps, sample.shape[0]).to(sample.device)\r\n t_emb = self.time_proj(timesteps)\r\n\r\n # `Timesteps` does not contain any weights and will always return f32 tensors\r\n # but time_embedding might actually be running in fp16. so we need to cast here.\r\n # there might be better ways to encapsulate this.\r\n t_emb = t_emb.to(dtype=self.dtype)\r\n\r\n emb = self.time_embedding(t_emb, timestep_cond)\r\n\r\n if self.class_embedding is not None:\r\n if class_labels is None:\r\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\r\n\r\n if self.config.class_embed_type == \"timestep\":\r\n class_labels = self.time_proj(class_labels)\r\n\r\n # `Timesteps` does not contain any weights and will always return f32 tensors\r\n # there might be better ways to encapsulate this.\r\n class_labels = class_labels.to(dtype=sample.dtype)\r\n\r\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\r\n\r\n if self.config.class_embeddings_concat:\r\n emb = torch.cat([emb, class_emb], dim=-1)\r\n else:\r\n emb = emb + class_emb\r\n\r\n if self.config.addition_embed_type == \"text\":\r\n aug_emb = self.add_embedding(encoder_hidden_states)\r\n emb = emb + aug_emb\r\n\r\n if self.time_embed_act is not None:\r\n emb = self.time_embed_act(emb)\r\n\r\n if self.encoder_hid_proj is not None:\r\n encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)\r\n\r\n order = self.order\r\n cond = order in [0, 1, 2, 3, 5, 10, 15, 25, 35]\r\n ipow = int(np.sqrt(9 + 8*order))\r\n if isinstance(mod, int):\r\n cond = order % mod == 0\r\n elif mod == \"pro\":\r\n cond = ipow * ipow == (9 + 8 * order)\r\n elif mod == \"50ls\":\r\n cond = order in [0, 1, 2, 3, 5, 10, 15, 25, 35,40] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"50ls2\":\r\n cond = order in [0, 10, 11, 12, 15, 20, 25, 30,35,45] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"50ls3\":\r\n cond = order in [0, 20, 25, 30,35,45,46,47,48,49] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"50ls4\":\r\n cond = order in [0, 9, 13, 14, 15, 28, 29, 32, 36] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"100ls\":\r\n cond = order > 85 or order < 10 or order % 5 == 0\r\n elif mod == \"75ls\":\r\n cond = order > 65 or order < 10 or order % 5 == 0\r\n elif mod == \"75ls2\":\r\n cond = order in [0, 1, 2, 3, 5, 10, 15, 25, 35,45,55,65]\r\n elif mod == \"s2\":\r\n cond = True\r\n #===============\r\n order = self.order #timestep, start by 0\r\n #===============\r\n # if ipow*ipow == (9+8*order): #progressive skip, i.e. [0,2,5,...]\r\n if cond:\r\n # if order%2 == 0: # merge 2 step\r\n # print(order)\r\n # 2. pre-process\r\n sample = self.conv_in(sample)\r\n\r\n # 3. down\r\n down_block_res_samples = (sample,)\r\n for downsample_block in self.down_blocks:\r\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\r\n sample, res_samples = downsample_block(\r\n hidden_states=sample,\r\n temb=emb,\r\n encoder_hidden_states=encoder_hidden_states,\r\n attention_mask=attention_mask,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n )\r\n else:\r\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\r\n\r\n down_block_res_samples += res_samples\r\n\r\n if down_block_additional_residuals is not None:\r\n new_down_block_res_samples = ()\r\n\r\n for down_block_res_sample, down_block_additional_residual in zip(\r\n down_block_res_samples, down_block_additional_residuals\r\n ):\r\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\r\n new_down_block_res_samples += (down_block_res_sample,)\r\n\r\n down_block_res_samples = new_down_block_res_samples\r\n\r\n # 4. mid\r\n if self.mid_block is not None:\r\n sample = self.mid_block(\r\n sample,\r\n emb,\r\n encoder_hidden_states=encoder_hidden_states,\r\n attention_mask=attention_mask,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n )\r\n\r\n if mid_block_additional_residual is not None:\r\n sample = sample + mid_block_additional_residual\r\n\r\n #----------------------save feature-------------------------\r\n setattr(self, 'skip_feature', deepcopy(down_block_res_samples))\r\n setattr(self, 'toup_feature', sample.detach().clone())\r\n #-----------------------save feature------------------------\r\n\r\n\r\n\r\n #-------------------expand feature for parallel---------------\r\n # print(step)\r\n\r\n # print('pre emb shape', emb.shape)\r\n if isinstance(timestep, list):\r\n #timesteps list, such as [981,961,941]\r\n timesteps = warpped_timestep(timestep, sample.shape[0]).to(sample.device)\r\n t_emb = self.time_proj(timesteps)\r\n\r\n # `Timesteps` does not contain any weights and will always return f32 tensors\r\n # but time_embedding might actually be running in fp16. so we need to cast here.\r\n # there might be better ways to encapsulate this.\r\n t_emb = t_emb.to(dtype=self.dtype)\r\n\r\n emb = self.time_embedding(t_emb, timestep_cond)\r\n # print('post emb shape', emb.shape)\r\n\r\n # print('pre sample shape', sample.shape)\r\n # print(step, sample.shape)\r\n down_block_res_samples = warpped_skip_feature(down_block_res_samples, step)\r\n sample = warpped_feature(sample, step)\r\n # print('post sample shape', sample.shape)\r\n\r\n # print('pre text shape', encoder_hidden_states.shape)\r\n encoder_hidden_states = warpped_text_emb(encoder_hidden_states, step)\r\n # print('post text shape', encoder_hidden_states.shape)\r\n # print('==========================')\r\n #-------------------expand feature for parallel---------------\r\n \r\n else:\r\n down_block_res_samples = self.skip_feature\r\n sample = self.toup_feature\r\n\r\n #-------------------expand feature for parallel---------------\r\n down_block_res_samples = warpped_skip_feature(down_block_res_samples, step)\r\n sample = warpped_feature(sample, step)\r\n encoder_hidden_states = warpped_text_emb(encoder_hidden_states, step)\r\n #-------------------expand feature for parallel---------------\r\n\r\n # 5. up\r\n for i, upsample_block in enumerate(self.up_blocks):\r\n is_final_block = i == len(self.up_blocks) - 1\r\n\r\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\r\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\r\n\r\n # if we have not reached the final block and need to forward the\r\n # upsample size, we do it here\r\n if not is_final_block and forward_upsample_size:\r\n upsample_size = down_block_res_samples[-1].shape[2:]\r\n\r\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\r\n sample = upsample_block(\r\n hidden_states=sample,\r\n temb=emb,\r\n res_hidden_states_tuple=res_samples,\r\n encoder_hidden_states=encoder_hidden_states,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n upsample_size=upsample_size,\r\n attention_mask=attention_mask,\r\n )\r\n else:\r\n sample = upsample_block(\r\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size\r\n )\r\n\r\n # 6. post-process\r\n if self.conv_norm_out:\r\n sample = self.conv_norm_out(sample)\r\n sample = self.conv_act(sample)\r\n sample = self.conv_out(sample)\r\n\r\n if not return_dict:\r\n return (sample,)\r\n\r\n return UNet2DConditionOutput(sample=sample)\r\n return forward\r\n if model.__class__.__name__ == 'UNet2DConditionModel':\r\n model.forward = faster_forward(model)\r"
},
{
"identifier": "seed_everything",
"path": "utils_if.py",
"snippet": "def seed_everything(seed):\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed(seed)\r\n random.seed(seed)\r\n np.random.seed(seed)\r"
}
] | from diffusers import DiffusionPipeline , IFPipeline, IFSuperResolutionPipeline, StableDiffusionUpscalePipeline
from diffusers.utils import pt_to_pil
from diffusers import DPMSolverMultistepScheduler
from utils_if import register_if1, register_if2,register_if3, register_faster_forward, seed_everything
import torch
| 9,980 |
seed_everything(2023)
prompt = "a lone sailboat drifting on calm waters"
stage_1 = DiffusionPipeline.from_pretrained(
"DeepFloyd/IF-I-XL-v1.0",
variant="fp16",
torch_dtype=torch.float16,
).to('cuda')
stage_2 = DiffusionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0",
text_encoder=None,
variant="fp16",
torch_dtype=torch.float16,
).to('cuda')
# stage 3
safety_modules = {
"feature_extractor": stage_1.feature_extractor,
"safety_checker": None,
"watermarker": stage_1.watermarker,
}
stage_3 = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-x4-upscaler",
**safety_modules,
torch_dtype=torch.float16
).to('cuda')
register_faster_forward(stage_1.unet, mod = '100ls')
register_if1(stage_1)
register_faster_forward(stage_2.unet, mod = 's2')
register_if2(stage_2)
|
seed_everything(2023)
prompt = "a lone sailboat drifting on calm waters"
stage_1 = DiffusionPipeline.from_pretrained(
"DeepFloyd/IF-I-XL-v1.0",
variant="fp16",
torch_dtype=torch.float16,
).to('cuda')
stage_2 = DiffusionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0",
text_encoder=None,
variant="fp16",
torch_dtype=torch.float16,
).to('cuda')
# stage 3
safety_modules = {
"feature_extractor": stage_1.feature_extractor,
"safety_checker": None,
"watermarker": stage_1.watermarker,
}
stage_3 = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-x4-upscaler",
**safety_modules,
torch_dtype=torch.float16
).to('cuda')
register_faster_forward(stage_1.unet, mod = '100ls')
register_if1(stage_1)
register_faster_forward(stage_2.unet, mod = 's2')
register_if2(stage_2)
| register_if3(stage_3)
| 2 | 2023-12-15 05:03:37+00:00 | 12k |
FoundationVision/GLEE | app/GLEE/glee/models/transformer_decoder/maskdino_decoder.py | [
{
"identifier": "TransformerDecoder",
"path": "app/GLEE/glee/models/transformer_decoder/dino_decoder.py",
"snippet": "class TransformerDecoder(nn.Module):\r\n\r\n def __init__(self, decoder_layer, num_layers, norm=None,\r\n return_intermediate=False,\r\n d_model=256, query_dim=4,\r\n modulate_hw_attn=True,\r\n num_feature_levels=1,\r\n deformable_decoder=True,\r\n decoder_query_perturber=None,\r\n dec_layer_number=None, # number of queries each layer in decoder\r\n rm_dec_query_scale=True,\r\n dec_layer_share=False,\r\n dec_layer_dropout_prob=None,\r\n cross_track_layer = False,\r\n n_levels = None, \r\n n_heads = None, \r\n n_points = None,\r\n ):\r\n super().__init__()\r\n if num_layers > 0:\r\n self.layers = _get_clones(decoder_layer, num_layers, layer_share=dec_layer_share)\r\n else:\r\n self.layers = []\r\n self.num_layers = num_layers\r\n self.norm = norm\r\n self.return_intermediate = return_intermediate\r\n assert return_intermediate, \"support return_intermediate only\"\r\n self.query_dim = query_dim\r\n assert query_dim in [2, 4], \"query_dim should be 2/4 but {}\".format(query_dim)\r\n self.num_feature_levels = num_feature_levels\r\n\r\n self.ref_point_head = MLP(query_dim // 2 * d_model, d_model, d_model, 2)\r\n if not deformable_decoder:\r\n self.query_pos_sine_scale = MLP(d_model, d_model, d_model, 2)\r\n else:\r\n self.query_pos_sine_scale = None\r\n\r\n if rm_dec_query_scale:\r\n self.query_scale = None\r\n else:\r\n raise NotImplementedError\r\n self.query_scale = MLP(d_model, d_model, d_model, 2)\r\n self.bbox_embed = None\r\n self.class_embed = None\r\n\r\n self.d_model = d_model\r\n self.modulate_hw_attn = modulate_hw_attn\r\n self.deformable_decoder = deformable_decoder\r\n\r\n if not deformable_decoder and modulate_hw_attn:\r\n self.ref_anchor_head = MLP(d_model, d_model, 2, 2)\r\n else:\r\n self.ref_anchor_head = None\r\n\r\n self.decoder_query_perturber = decoder_query_perturber\r\n self.box_pred_damping = None\r\n\r\n self.dec_layer_number = dec_layer_number\r\n if dec_layer_number is not None:\r\n assert isinstance(dec_layer_number, list)\r\n assert len(dec_layer_number) == num_layers\r\n # assert dec_layer_number[0] ==\r\n\r\n self.dec_layer_dropout_prob = dec_layer_dropout_prob\r\n if dec_layer_dropout_prob is not None:\r\n assert isinstance(dec_layer_dropout_prob, list)\r\n assert len(dec_layer_dropout_prob) == num_layers\r\n for i in dec_layer_dropout_prob:\r\n assert 0.0 <= i <= 1.0\r\n if cross_track_layer: # add a cross-attention-layer before track ffn head\r\n self.cross_track_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\r\n self.cross_track = True\r\n else:\r\n self.cross_track = False\r\n\r\n self._reset_parameters()\r\n\r\n def _reset_parameters(self):\r\n for p in self.parameters():\r\n if p.dim() > 1:\r\n nn.init.xavier_uniform_(p)\r\n for m in self.modules():\r\n if isinstance(m, MSDeformAttn):\r\n m._reset_parameters()\r\n @staticmethod\r\n def with_pos_embed(tensor, pos):\r\n return tensor if pos is None else tensor + pos\r\n\r\n\r\n def forward(self, tgt, memory,\r\n tgt_mask: Optional[Tensor] = None,\r\n memory_mask: Optional[Tensor] = None,\r\n tgt_key_padding_mask: Optional[Tensor] = None,\r\n memory_key_padding_mask: Optional[Tensor] = None,\r\n pos: Optional[Tensor] = None,\r\n refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2\r\n # for memory\r\n level_start_index: Optional[Tensor] = None, # num_levels\r\n spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\r\n valid_ratios: Optional[Tensor] = None,\r\n task = None,\r\n extra = None,\r\n\r\n ):\r\n \"\"\"\r\n Input:\r\n - tgt: nq, bs, d_model\r\n - memory: hw, bs, d_model\r\n - pos: hw, bs, d_model\r\n - refpoints_unsigmoid: nq, bs, 2/4\r\n - valid_ratios/spatial_shapes: bs, nlevel, 2\r\n \"\"\"\r\n output = tgt\r\n device = tgt.device\r\n\r\n intermediate = []\r\n reference_points = refpoints_unsigmoid.sigmoid().to(device)\r\n ref_points = [reference_points]\r\n\r\n for layer_id, layer in enumerate(self.layers):\r\n # preprocess ref points\r\n if self.training and self.decoder_query_perturber is not None and layer_id != 0:\r\n reference_points = self.decoder_query_perturber(reference_points)\r\n\r\n reference_points_input = reference_points[:, :, None] \\\r\n * torch.cat([valid_ratios, valid_ratios], -1)[None, :] # nq, bs, nlevel, 4\r\n query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # nq, bs, 256*2\r\n\r\n raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256\r\n pos_scale = self.query_scale(output) if self.query_scale is not None else 1\r\n query_pos = pos_scale * raw_query_pos\r\n\r\n output = layer(\r\n tgt=output,\r\n tgt_query_pos=query_pos,\r\n tgt_query_sine_embed=query_sine_embed,\r\n tgt_key_padding_mask=tgt_key_padding_mask,\r\n tgt_reference_points=reference_points_input,\r\n\r\n memory=memory,\r\n memory_key_padding_mask=memory_key_padding_mask,\r\n memory_level_start_index=level_start_index,\r\n memory_spatial_shapes=spatial_shapes,\r\n memory_pos=pos,\r\n\r\n self_attn_mask=tgt_mask,\r\n cross_attn_mask=memory_mask,\r\n task = task,\r\n extra = extra,\r\n layer_id = layer_id,\r\n )\r\n\r\n # iter update\r\n if self.bbox_embed is not None:\r\n reference_before_sigmoid = inverse_sigmoid(reference_points)\r\n delta_unsig = self.bbox_embed[layer_id](output).to(device)\r\n outputs_unsig = delta_unsig + reference_before_sigmoid\r\n new_reference_points = outputs_unsig.sigmoid()\r\n\r\n reference_points = new_reference_points.detach()\r\n # if layer_id != self.num_layers - 1:\r\n ref_points.append(new_reference_points)\r\n\r\n intermediate.append(self.norm(output))\r\n\r\n\r\n if self.cross_track:\r\n tgt_track = self.cross_track_attn(self.with_pos_embed(output, query_pos).transpose(0, 1),\r\n reference_points_input.transpose(0, 1).contiguous(),\r\n memory.transpose(0, 1), spatial_shapes, level_start_index,\r\n memory_key_padding_mask).transpose(0, 1)\r\n tgt_track = tgt_track + output\r\n tgt_track = tgt_track.transpose(0, 1)\r\n else:\r\n tgt_track = None\r\n\r\n return [\r\n [itm_out.transpose(0, 1) for itm_out in intermediate],\r\n [itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points], tgt_track\r\n ]\r"
},
{
"identifier": "DeformableTransformerDecoderLayer",
"path": "app/GLEE/glee/models/transformer_decoder/dino_decoder.py",
"snippet": "class DeformableTransformerDecoderLayer(nn.Module):\r\n\r\n def __init__(self, d_model=256, d_ffn=1024,\r\n dropout=0.1, activation=\"relu\",\r\n n_levels=4, n_heads=8, n_points=4,\r\n use_deformable_box_attn=False,\r\n key_aware_type=None,\r\n ):\r\n super().__init__()\r\n self.n_heads = n_heads\r\n # cross attention\r\n if use_deformable_box_attn:\r\n raise NotImplementedError\r\n else:\r\n self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\r\n self.dropout1 = nn.Dropout(dropout)\r\n self.norm1 = nn.LayerNorm(d_model)\r\n\r\n # self attention\r\n self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\r\n self.dropout2 = nn.Dropout(dropout)\r\n self.norm2 = nn.LayerNorm(d_model)\r\n\r\n # ffn\r\n self.linear1 = nn.Linear(d_model, d_ffn)\r\n self.activation = _get_activation_fn(activation)\r\n self.dropout3 = nn.Dropout(dropout)\r\n self.linear2 = nn.Linear(d_ffn, d_model)\r\n self.dropout4 = nn.Dropout(dropout)\r\n self.norm3 = nn.LayerNorm(d_model)\r\n\r\n self.key_aware_type = key_aware_type\r\n self.key_aware_proj = None\r\n\r\n def rm_self_attn_modules(self):\r\n self.self_attn = None\r\n self.dropout2 = None\r\n self.norm2 = None\r\n\r\n @staticmethod\r\n def with_pos_embed(tensor, pos):\r\n return tensor if pos is None else tensor + pos\r\n\r\n def forward_ffn(self, tgt):\r\n tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))\r\n tgt = tgt + self.dropout4(tgt2)\r\n tgt = self.norm3(tgt)\r\n return tgt\r\n\r\n @autocast(enabled=False)\r\n def forward(self,\r\n # for tgt\r\n tgt: Optional[Tensor], # nq, bs, d_model\r\n tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\r\n tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\r\n tgt_key_padding_mask: Optional[Tensor] = None,\r\n tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\r\n\r\n # for memory\r\n memory: Optional[Tensor] = None, # hw, bs, d_model\r\n memory_key_padding_mask: Optional[Tensor] = None,\r\n memory_level_start_index: Optional[Tensor] = None, # num_levels\r\n memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\r\n memory_pos: Optional[Tensor] = None, # pos for memory\r\n\r\n # sa\r\n self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\r\n cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\r\n task = None,\r\n extra = None,\r\n layer_id = None,\r\n ):\r\n \"\"\"\r\n Input:\r\n - tgt/tgt_query_pos: nq, bs, d_model\r\n -\r\n \"\"\"\r\n # self attention\r\n\r\n\r\n if task in ['grounding', 'rvos'] or 'visual_prompt_tokens' in extra:\r\n if self_attn_mask is not None: # training with denoising query \r\n\r\n if 'visual_prompt_tokens' in extra: # has visual prompt \r\n level_index = layer_id % 3 # src level : self.num_feature_levels\r\n prompt_tokens = extra['visual_prompt_tokens'][level_index]\r\n promot_pos = prompt_tokens.detach().clone()\r\n prompt_mask = extra['visual_prompt_nonzero_mask'][level_index]\r\n else: #grounding\r\n prompt_tokens = extra['grounding_tokens']\r\n promot_pos = prompt_tokens.detach().clone()\r\n prompt_mask = extra['grounding_nonzero_mask']\r\n ori_size = tgt.shape[0]\r\n new_mask_size = tgt.shape[0]+prompt_tokens.shape[0]\r\n new_self_attn_mask = torch.zeros((tgt.shape[1], new_mask_size, new_mask_size), dtype=torch.bool, device=tgt.device)\r\n \r\n new_self_attn_mask[:,:ori_size,:ori_size] = self_attn_mask.unsqueeze(0).repeat(tgt.shape[1],1,1) #denoising matching keepmask\r\n\r\n # prompt to prompt mask set to True if they are not valid\r\n # new_self_attn_mask[:,ori_size:,ori_size:][prompt_mask] = True\r\n # new_self_attn_mask[:,ori_size:,ori_size:].transpose(1,2)[prompt_mask] = True\r\n\r\n # prompt2obj and obj2prompt mask set to True \r\n # new_self_attn_mask[:,ori_size-300:ori_size,ori_size:][] = True \r\n new_self_attn_mask[:,:ori_size,ori_size:].transpose(1,2)[prompt_mask] = True \r\n \r\n new_self_attn_mask[:,ori_size:,:ori_size][prompt_mask] = True \r\n # new_self_attn_mask[:,ori_size:,ori_size-300:ori_size].transpose(1,2)[] = True \r\n\r\n new_self_attn_mask = new_self_attn_mask.repeat_interleave(self.n_heads, dim=0)\r\n else: # with out denoising query\r\n if 'visual_prompt_tokens' in extra: # has visual prompt \r\n level_index = layer_id % 3 # src level : self.num_feature_levels\r\n prompt_tokens = extra['visual_prompt_tokens'][level_index]\r\n promot_pos = prompt_tokens.detach().clone()\r\n prompt_mask = extra['visual_prompt_nonzero_mask'][level_index]\r\n else: #grounding\r\n prompt_tokens = extra['grounding_tokens']\r\n promot_pos = prompt_tokens.detach().clone()\r\n prompt_mask = extra['grounding_nonzero_mask']\r\n ori_size = tgt.shape[0]\r\n new_mask_size = tgt.shape[0]+prompt_tokens.shape[0]\r\n new_self_attn_mask = torch.zeros((tgt.shape[1], new_mask_size, new_mask_size), dtype=torch.bool, device=tgt.device)\r\n new_self_attn_mask[:,:ori_size,ori_size:].transpose(1,2)[prompt_mask] = True \r\n new_self_attn_mask[:,ori_size:,:ori_size][prompt_mask] = True \r\n new_self_attn_mask = new_self_attn_mask.repeat_interleave(self.n_heads, dim=0)\r\n\r\n\r\n if self.self_attn is not None:\r\n tgt = torch.cat([tgt,prompt_tokens],dim=0)\r\n tgt_query_pos = torch.cat([tgt_query_pos,promot_pos],dim=0)\r\n q = k = self.with_pos_embed(tgt, tgt_query_pos)\r\n tgt2 = self.self_attn(q, k, tgt, attn_mask=new_self_attn_mask)[0]\r\n tgt = tgt + self.dropout2(tgt2)\r\n tgt = self.norm2(tgt)\r\n tgt = tgt[:ori_size]\r\n tgt_query_pos = tgt_query_pos[:ori_size]\r\n else:\r\n if self.self_attn is not None:\r\n q = k = self.with_pos_embed(tgt, tgt_query_pos)\r\n tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0]\r\n tgt = tgt + self.dropout2(tgt2)\r\n tgt = self.norm2(tgt)\r\n\r\n # cross attention\r\n if self.key_aware_type is not None:\r\n if self.key_aware_type == 'mean':\r\n tgt = tgt + memory.mean(0, keepdim=True)\r\n elif self.key_aware_type == 'proj_mean':\r\n tgt = tgt + self.key_aware_proj(memory).mean(0, keepdim=True)\r\n else:\r\n raise NotImplementedError(\"Unknown key_aware_type: {}\".format(self.key_aware_type))\r\n tgt2 = self.cross_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),\r\n tgt_reference_points.transpose(0, 1).contiguous(),\r\n memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index,\r\n memory_key_padding_mask).transpose(0, 1)\r\n tgt = tgt + self.dropout1(tgt2)\r\n tgt = self.norm1(tgt)\r\n\r\n # ffn\r\n tgt = self.forward_ffn(tgt)\r\n\r\n return tgt\r"
},
{
"identifier": "MLP",
"path": "app/GLEE/glee/utils/utils.py",
"snippet": "class MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x"
},
{
"identifier": "gen_encoder_output_proposals",
"path": "app/GLEE/glee/utils/utils.py",
"snippet": "def gen_encoder_output_proposals(memory:Tensor, memory_padding_mask:Tensor, spatial_shapes:Tensor):\n \"\"\"\n Input:\n - memory: bs, \\sum{hw}, d_model\n - memory_padding_mask: bs, \\sum{hw}\n - spatial_shapes: nlevel, 2\n Output:\n - output_memory: bs, \\sum{hw}, d_model\n - output_proposals: bs, \\sum{hw}, 4\n \"\"\"\n N_, S_, C_ = memory.shape\n base_scale = 4.0\n proposals = []\n _cur = 0\n for lvl, (H_, W_) in enumerate(spatial_shapes):\n mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)\n valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),\n torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))\n grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n\n scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)\n grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale\n wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)\n proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)\n proposals.append(proposal)\n _cur += (H_ * W_)\n output_proposals = torch.cat(proposals, 1)\n output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)\n output_proposals = torch.log(output_proposals / (1 - output_proposals))\n output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))\n output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))\n\n output_memory = memory\n output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))\n output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n return output_memory, output_proposals"
},
{
"identifier": "inverse_sigmoid",
"path": "app/GLEE/glee/utils/utils.py",
"snippet": "def inverse_sigmoid(x, eps=1e-5):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)"
},
{
"identifier": "box_ops",
"path": "app/GLEE/glee/utils/box_ops.py",
"snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_xywh_to_xyxy(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef masks_to_boxes(masks):"
}
] | import logging
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from detectron2.utils.registry import Registry
from detectron2.structures import BitMasks
from timm.models.layers import trunc_normal_
from .dino_decoder import TransformerDecoder, DeformableTransformerDecoderLayer
from ...utils.utils import MLP, gen_encoder_output_proposals, inverse_sigmoid
from ...utils import box_ops | 8,017 | }
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
# init decoder
self.decoder_norm = decoder_norm = nn.LayerNorm(hidden_dim)
decoder_layer = DeformableTransformerDecoderLayer(hidden_dim, dim_feedforward,
dropout, activation,
self.num_feature_levels, nhead, dec_n_points)
self.decoder = TransformerDecoder(decoder_layer, self.num_layers, decoder_norm,
return_intermediate=return_intermediate_dec,
d_model=hidden_dim, query_dim=query_dim,
num_feature_levels=self.num_feature_levels,
dec_layer_share=dec_layer_share,
cross_track_layer = cross_track_layer,
n_levels=self.num_feature_levels, n_heads=nhead, n_points=dec_n_points
)
self.cross_track_layer = cross_track_layer
self.hidden_dim = hidden_dim
self._bbox_embed = _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0)
box_embed_layerlist = [_bbox_embed for i in range(self.num_layers)] # share box prediction each layer
self.bbox_embed = nn.ModuleList(box_embed_layerlist)
self.decoder.bbox_embed = self.bbox_embed
@classmethod
def from_config(cls, cfg, in_channels, lang_encoder, mask_classification):
ret = {}
ret["in_channels"] = in_channels
ret["lang_encoder"] = lang_encoder
ret["mask_classification"] = mask_classification
ret["dim_projection"] = cfg.MODEL.DIM_PROJ
ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
ret["hidden_dim"] = cfg.MODEL.MaskDINO.HIDDEN_DIM
ret["num_queries"] = cfg.MODEL.MaskDINO.NUM_OBJECT_QUERIES
# Transformer parameters:
ret["nheads"] = cfg.MODEL.MaskDINO.NHEADS
ret["dim_feedforward"] = cfg.MODEL.MaskDINO.DIM_FEEDFORWARD
ret["dec_layers"] = cfg.MODEL.MaskDINO.DEC_LAYERS
ret["enforce_input_project"] = cfg.MODEL.MaskDINO.ENFORCE_INPUT_PROJ
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
ret["two_stage"] =cfg.MODEL.MaskDINO.TWO_STAGE
ret["initialize_box_type"] = cfg.MODEL.MaskDINO.INITIALIZE_BOX_TYPE # ['no', 'bitmask', 'mask2box']
ret["dn"]=cfg.MODEL.MaskDINO.DN
ret["noise_scale"] =cfg.MODEL.MaskDINO.DN_NOISE_SCALE
ret["dn_num"] =cfg.MODEL.MaskDINO.DN_NUM
ret["initial_pred"] =cfg.MODEL.MaskDINO.INITIAL_PRED
ret["learn_tgt"] = cfg.MODEL.MaskDINO.LEARN_TGT
ret["total_num_feature_levels"] = cfg.MODEL.SEM_SEG_HEAD.TOTAL_NUM_FEATURE_LEVELS
ret["semantic_ce_loss"] = cfg.MODEL.MaskDINO.TEST.SEMANTIC_ON and cfg.MODEL.MaskDINO.SEMANTIC_CE_LOSS and ~cfg.MODEL.MaskDINO.TEST.PANOPTIC_ON
ret["cross_track_layer"] = cfg.MODEL.CROSS_TRACK
return ret
def prepare_for_dn(self, targets, tgt, refpoint_emb, batch_size,task):
"""
modified from dn-detr. You can refer to dn-detr
https://github.com/IDEA-Research/DN-DETR/blob/main/models/dn_dab_deformable_detr/dn_components.py
for more details
:param dn_args: scalar, noise_scale
:param tgt: original tgt (content) in the matching part
:param refpoint_emb: positional anchor queries in the matching part
:param batch_size: bs
"""
if self.training:
scalar, noise_scale = self.dn_num,self.noise_scale
known = [(torch.ones_like(t['labels'])).cuda() for t in targets]
know_idx = [torch.nonzero(t) for t in known]
known_num = [sum(k) for k in known]
# use fix number of dn queries
if max(known_num)>0:
scalar = scalar//(int(max(known_num)))
else:
scalar = 0
if scalar == 0:
input_query_label = None
input_query_bbox = None
attn_mask = None
mask_dict = None
return input_query_label, input_query_bbox, attn_mask, mask_dict
# can be modified to selectively denosie some label or boxes; also known label prediction
unmask_bbox = unmask_label = torch.cat(known)
labels = torch.cat([t['labels'] for t in targets])
boxes = torch.cat([t['boxes'] for t in targets])
batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])
# known
known_indice = torch.nonzero(unmask_label + unmask_bbox)
known_indice = known_indice.view(-1)
# noise
known_indice = known_indice.repeat(scalar, 1).view(-1)
known_labels = labels.repeat(scalar, 1).view(-1)
known_bid = batch_idx.repeat(scalar, 1).view(-1)
known_bboxs = boxes.repeat(scalar, 1)
known_labels_expaned = known_labels.clone()
known_bbox_expand = known_bboxs.clone()
# noise on the label
if noise_scale > 0:
p = torch.rand_like(known_labels_expaned.float())
chosen_indice = torch.nonzero(p < (noise_scale * 0.5)).view(-1) # half of bbox prob
new_label = torch.randint_like(chosen_indice, 0, self.num_classes[task]) # randomly put a new one here
known_labels_expaned.scatter_(0, chosen_indice, new_label)
if noise_scale > 0:
diff = torch.zeros_like(known_bbox_expand)
diff[:, :2] = known_bbox_expand[:, 2:] / 2
diff[:, 2:] = known_bbox_expand[:, 2:]
known_bbox_expand += torch.mul((torch.rand_like(known_bbox_expand) * 2 - 1.0),
diff).cuda() * noise_scale
known_bbox_expand = known_bbox_expand.clamp(min=0.0, max=1.0)
m = known_labels_expaned.long().to('cuda')
input_label_embed = self.label_enc[task](m)
| # ------------------------------------------------------------------------
# DINO
# Copyright (c) 2022 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from Mask2Former https://github.com/facebookresearch/Mask2Former by Feng Li and Hao Zhang.
TRANSFORMER_DECODER_REGISTRY = Registry("TRANSFORMER_MODULE")
TRANSFORMER_DECODER_REGISTRY.__doc__ = """
Registry for transformer module in MaskDINO.
"""
def build_transformer_decoder(cfg, in_channels, lang_encoder, mask_classification=True):
"""
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
"""
name = cfg.MODEL.MaskDINO.TRANSFORMER_DECODER_NAME
return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, lang_encoder, mask_classification)
@TRANSFORMER_DECODER_REGISTRY.register()
class MaskDINODecoder(nn.Module):
@configurable
def __init__(
self,
in_channels,
lang_encoder,
mask_classification=True,
*,
num_classes: int,
hidden_dim: int,
num_queries: int,
nheads: int,
dim_feedforward: int,
dec_layers: int,
mask_dim: int,
dim_projection: int,
enforce_input_project: bool,
two_stage: bool,
dn: str,
noise_scale:float,
dn_num:int,
initialize_box_type:bool,
initial_pred:bool,
learn_tgt: bool,
total_num_feature_levels: int = 4,
dropout: float = 0.0,
activation: str = 'relu',
nhead: int = 8,
dec_n_points: int = 4,
return_intermediate_dec: bool = True,
query_dim: int = 4,
dec_layer_share: bool = False,
semantic_ce_loss: bool = False,
cross_track_layer: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
in_channels: channels of the input features
mask_classification: whether to add mask classifier or not
num_classes: number of classes
hidden_dim: Transformer feature dimension
num_queries: number of queries
nheads: number of heads
dim_feedforward: feature dimension in feedforward network
enc_layers: number of Transformer encoder layers
dec_layers: number of Transformer decoder layers
pre_norm: whether to use pre-LayerNorm or not
mask_dim: mask feature dimension
enforce_input_project: add input project 1x1 conv even if input
channels and hidden dim is identical
d_model: transformer dimension
dropout: dropout rate
activation: activation function
nhead: num heads in multi-head attention
dec_n_points: number of sampling points in decoder
return_intermediate_dec: return the intermediate results of decoder
query_dim: 4 -> (x, y, w, h)
dec_layer_share: whether to share each decoder layer
semantic_ce_loss: use ce loss for semantic segmentation
"""
super().__init__()
assert mask_classification, "Only support mask classification model"
self.mask_classification = mask_classification
self.num_feature_levels = total_num_feature_levels
self.initial_pred = initial_pred
self.lang_encoder = lang_encoder
# define Transformer decoder here
self.dn=dn
self.learn_tgt = learn_tgt
self.noise_scale=noise_scale
self.dn_num=dn_num
self.num_heads = nheads
self.num_layers = dec_layers
self.two_stage=two_stage
self.initialize_box_type = initialize_box_type
self.total_num_feature_levels = total_num_feature_levels
self.num_queries = num_queries
self.semantic_ce_loss = semantic_ce_loss
# learnable query features
if not two_stage or self.learn_tgt:
self.query_feat = nn.Embedding(num_queries, hidden_dim)
if not two_stage and initialize_box_type == 'no':
self.query_embed = nn.Embedding(num_queries, 4)
if two_stage:
self.enc_output = nn.Linear(hidden_dim, hidden_dim)
self.enc_output_norm = nn.LayerNorm(hidden_dim)
self.input_proj = nn.ModuleList()
for _ in range(self.num_feature_levels):
if in_channels != hidden_dim or enforce_input_project:
self.input_proj.append(Conv2d(in_channels, hidden_dim, kernel_size=1))
weight_init.c2_xavier_fill(self.input_proj[-1])
else:
self.input_proj.append(nn.Sequential())
self.num_classes = {
'obj365':100,
'obj365_clip':100,
'lvis':100,
'openimage':100,
'lvis_clip':100,
'openimage_clip':100,
'grit':100,
'vg':200,
'coco':80,
'coco_clip':80,
'grounding':1,
'rvos':1,
'sa1b':1,
'sa1b_clip':1,
'bdd_det':10,
'bdd_inst':8,
'ytvis19':40,
'image_yt19':40,
'image_yt21':40,
'bdd_track_seg':8,
'bdd_track_box':8,
'ovis':25,
'image_o':25,
'ytvis21':40,
'uvo_video': 81,
'ytbvos':1,
}
# output FFNs
assert self.mask_classification, "why not class embedding?"
self.confidence_score = MLP(hidden_dim, hidden_dim, 1, 2)
self.category_embed = nn.Parameter(torch.rand(hidden_dim, dim_projection))
# trunc_normal_(self.category_embed, std=.02)
# self.track_embed = MLP(hidden_dim, hidden_dim, hidden_dim, 3)
self.coco_label_enc = nn.Embedding(80,hidden_dim)
self.obj365_label_enc = nn.Embedding(100, hidden_dim)
self.vg_label_enc = nn.Embedding(200, hidden_dim)
self.grounding_label_enc = nn.Embedding(1,hidden_dim)
self.ytvis19_label_enc = nn.Embedding(40,hidden_dim)
self.ytvis21_label_enc = nn.Embedding(40,hidden_dim)
self.ovis_label_enc = nn.Embedding(25,hidden_dim)
self.uvo_label_enc = nn.Embedding(81,hidden_dim)
self.bdd_det = nn.Embedding(10,hidden_dim)
self.bdd_inst = nn.Embedding(8,hidden_dim)
self.label_enc = {
'coco': self.coco_label_enc,
'coco_clip': self.coco_label_enc,
'coconomask': self.coco_label_enc,
'obj365': self.obj365_label_enc,
'lvis': self.obj365_label_enc,
'openimage': self.obj365_label_enc,
'grit': self.obj365_label_enc,
'vg': self.vg_label_enc,
'obj365_clip': self.obj365_label_enc,
'lvis_clip': self.obj365_label_enc,
'openimage_clip': self.obj365_label_enc,
'bdd_det':self.bdd_det,
'bdd_inst':self.bdd_inst,
'bdd_track_seg':self.bdd_inst,
'bdd_track_box':self.bdd_inst,
'sa1b': self.grounding_label_enc,
'sa1b_clip': self.grounding_label_enc,
'grounding': self.grounding_label_enc,
'rvos': self.grounding_label_enc,
'uvo_video':self.uvo_label_enc,
'ytvis19':self.ytvis19_label_enc,
'image_yt19': self.ytvis19_label_enc,
'ytvis21':self.ytvis21_label_enc,
'image_yt21':self.ytvis21_label_enc,
'ovis':self.ovis_label_enc,
'image_o': self.ovis_label_enc,
'burst':self.grounding_label_enc,
'ytbvos':self.grounding_label_enc,
}
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
# init decoder
self.decoder_norm = decoder_norm = nn.LayerNorm(hidden_dim)
decoder_layer = DeformableTransformerDecoderLayer(hidden_dim, dim_feedforward,
dropout, activation,
self.num_feature_levels, nhead, dec_n_points)
self.decoder = TransformerDecoder(decoder_layer, self.num_layers, decoder_norm,
return_intermediate=return_intermediate_dec,
d_model=hidden_dim, query_dim=query_dim,
num_feature_levels=self.num_feature_levels,
dec_layer_share=dec_layer_share,
cross_track_layer = cross_track_layer,
n_levels=self.num_feature_levels, n_heads=nhead, n_points=dec_n_points
)
self.cross_track_layer = cross_track_layer
self.hidden_dim = hidden_dim
self._bbox_embed = _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0)
box_embed_layerlist = [_bbox_embed for i in range(self.num_layers)] # share box prediction each layer
self.bbox_embed = nn.ModuleList(box_embed_layerlist)
self.decoder.bbox_embed = self.bbox_embed
@classmethod
def from_config(cls, cfg, in_channels, lang_encoder, mask_classification):
ret = {}
ret["in_channels"] = in_channels
ret["lang_encoder"] = lang_encoder
ret["mask_classification"] = mask_classification
ret["dim_projection"] = cfg.MODEL.DIM_PROJ
ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
ret["hidden_dim"] = cfg.MODEL.MaskDINO.HIDDEN_DIM
ret["num_queries"] = cfg.MODEL.MaskDINO.NUM_OBJECT_QUERIES
# Transformer parameters:
ret["nheads"] = cfg.MODEL.MaskDINO.NHEADS
ret["dim_feedforward"] = cfg.MODEL.MaskDINO.DIM_FEEDFORWARD
ret["dec_layers"] = cfg.MODEL.MaskDINO.DEC_LAYERS
ret["enforce_input_project"] = cfg.MODEL.MaskDINO.ENFORCE_INPUT_PROJ
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
ret["two_stage"] =cfg.MODEL.MaskDINO.TWO_STAGE
ret["initialize_box_type"] = cfg.MODEL.MaskDINO.INITIALIZE_BOX_TYPE # ['no', 'bitmask', 'mask2box']
ret["dn"]=cfg.MODEL.MaskDINO.DN
ret["noise_scale"] =cfg.MODEL.MaskDINO.DN_NOISE_SCALE
ret["dn_num"] =cfg.MODEL.MaskDINO.DN_NUM
ret["initial_pred"] =cfg.MODEL.MaskDINO.INITIAL_PRED
ret["learn_tgt"] = cfg.MODEL.MaskDINO.LEARN_TGT
ret["total_num_feature_levels"] = cfg.MODEL.SEM_SEG_HEAD.TOTAL_NUM_FEATURE_LEVELS
ret["semantic_ce_loss"] = cfg.MODEL.MaskDINO.TEST.SEMANTIC_ON and cfg.MODEL.MaskDINO.SEMANTIC_CE_LOSS and ~cfg.MODEL.MaskDINO.TEST.PANOPTIC_ON
ret["cross_track_layer"] = cfg.MODEL.CROSS_TRACK
return ret
def prepare_for_dn(self, targets, tgt, refpoint_emb, batch_size,task):
"""
modified from dn-detr. You can refer to dn-detr
https://github.com/IDEA-Research/DN-DETR/blob/main/models/dn_dab_deformable_detr/dn_components.py
for more details
:param dn_args: scalar, noise_scale
:param tgt: original tgt (content) in the matching part
:param refpoint_emb: positional anchor queries in the matching part
:param batch_size: bs
"""
if self.training:
scalar, noise_scale = self.dn_num,self.noise_scale
known = [(torch.ones_like(t['labels'])).cuda() for t in targets]
know_idx = [torch.nonzero(t) for t in known]
known_num = [sum(k) for k in known]
# use fix number of dn queries
if max(known_num)>0:
scalar = scalar//(int(max(known_num)))
else:
scalar = 0
if scalar == 0:
input_query_label = None
input_query_bbox = None
attn_mask = None
mask_dict = None
return input_query_label, input_query_bbox, attn_mask, mask_dict
# can be modified to selectively denosie some label or boxes; also known label prediction
unmask_bbox = unmask_label = torch.cat(known)
labels = torch.cat([t['labels'] for t in targets])
boxes = torch.cat([t['boxes'] for t in targets])
batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])
# known
known_indice = torch.nonzero(unmask_label + unmask_bbox)
known_indice = known_indice.view(-1)
# noise
known_indice = known_indice.repeat(scalar, 1).view(-1)
known_labels = labels.repeat(scalar, 1).view(-1)
known_bid = batch_idx.repeat(scalar, 1).view(-1)
known_bboxs = boxes.repeat(scalar, 1)
known_labels_expaned = known_labels.clone()
known_bbox_expand = known_bboxs.clone()
# noise on the label
if noise_scale > 0:
p = torch.rand_like(known_labels_expaned.float())
chosen_indice = torch.nonzero(p < (noise_scale * 0.5)).view(-1) # half of bbox prob
new_label = torch.randint_like(chosen_indice, 0, self.num_classes[task]) # randomly put a new one here
known_labels_expaned.scatter_(0, chosen_indice, new_label)
if noise_scale > 0:
diff = torch.zeros_like(known_bbox_expand)
diff[:, :2] = known_bbox_expand[:, 2:] / 2
diff[:, 2:] = known_bbox_expand[:, 2:]
known_bbox_expand += torch.mul((torch.rand_like(known_bbox_expand) * 2 - 1.0),
diff).cuda() * noise_scale
known_bbox_expand = known_bbox_expand.clamp(min=0.0, max=1.0)
m = known_labels_expaned.long().to('cuda')
input_label_embed = self.label_enc[task](m) | input_bbox_embed = inverse_sigmoid(known_bbox_expand) | 4 | 2023-12-15 01:12:36+00:00 | 12k |
SHI-Labs/VCoder | vcoder_llava/train/vcoder_ds_train.py | [
{
"identifier": "IGNORE_INDEX",
"path": "vcoder_llava/constants.py",
"snippet": "IGNORE_INDEX = -100"
},
{
"identifier": "DEFAULT_IMAGE_TOKEN",
"path": "vcoder_llava/constants.py",
"snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\""
},
{
"identifier": "DEFAULT_SEG_TOKEN",
"path": "vcoder_llava/constants.py",
"snippet": "DEFAULT_SEG_TOKEN = \"<seg>\""
},
{
"identifier": "DEFAULT_DEPTH_TOKEN",
"path": "vcoder_llava/constants.py",
"snippet": "DEFAULT_DEPTH_TOKEN = \"<depth>\""
},
{
"identifier": "VCoderDSLLaVATrainer",
"path": "vcoder_llava/train/vcoder_ds_llava_trainer.py",
"snippet": "class VCoderDSLLaVATrainer(Trainer):\n\n def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:\n if self.train_dataset is None or not has_length(self.train_dataset):\n return None\n\n if self.args.group_by_modality_length:\n lengths = self.train_dataset.modality_lengths\n return LengthGroupedSampler(\n # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps\n self.args.train_batch_size,\n world_size=self.args.world_size,\n lengths=lengths,\n group_by_modality=True,\n )\n else:\n return super()._get_train_sampler()\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n \"\"\"\n if is_sagemaker_mp_enabled():\n return super().create_optimizer()\n if self.sharded_ddp == ShardedDDPOption.SIMPLE:\n return super().create_optimizer()\n\n opt_model = self.model\n\n if self.optimizer is None:\n decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)\n decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n if self.args.depth_mm_projector_lr is not None:\n projector_parameters = [name for name, _ in opt_model.named_parameters() if \"depth_mm_projector\" in name]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n in decay_parameters and n not in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n not in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n in decay_parameters and n in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": self.args.weight_decay,\n \"lr\": self.args.depth_mm_projector_lr,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n \"lr\": self.args.depth_mm_projector_lr,\n },\n ]\n else:\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)\n\n if self.sharded_ddp == ShardedDDPOption.SIMPLE:\n self.optimizer = OSS(\n params=optimizer_grouped_parameters,\n optim=optimizer_cls,\n **optimizer_kwargs,\n )\n else:\n self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n if optimizer_cls.__name__ == \"Adam8bit\":\n import bitsandbytes\n\n manager = bitsandbytes.optim.GlobalOptimManager.get_instance()\n\n skipped = 0\n for module in opt_model.modules():\n if isinstance(module, nn.Embedding):\n skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())\n logger.info(f\"skipped {module}: {skipped/2**20}M params\")\n manager.register_module_override(module, \"weight\", {\"optim_bits\": 32})\n logger.debug(f\"bitsandbytes: will optimize {module} in fp32\")\n logger.info(f\"skipped: {skipped/2**20}M params\")\n\n return self.optimizer\n \n def _save_checkpoint(self, model, trial, metrics=None):\n super(VCoderDSLLaVATrainer, self)._save_checkpoint(model, trial, metrics)\n\n def _save(self, output_dir: Optional[str] = None, state_dict=None):\n super(VCoderDSLLaVATrainer, self)._save(output_dir, state_dict)"
},
{
"identifier": "vcoder_conversation",
"path": "vcoder_llava/vcoder_conversation.py",
"snippet": "class SeparatorStyle(Enum):\nclass VCoderConversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = seg.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = depth.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = seg.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = depth.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def get_segs(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def get_depths(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):"
},
{
"identifier": "tokenizer_image_token",
"path": "vcoder_llava/mm_utils.py",
"snippet": "def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids"
},
{
"identifier": "tokenizer_seg_token",
"path": "vcoder_llava/mm_utils.py",
"snippet": "def tokenizer_seg_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, seg_token_index=SEG_TOKEN_INDEX, return_tensors=None): \n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<seg>\\n<image>')]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n \n for x in insert_separator(prompt_chunks, [seg_token_index, image_token_index] * (offset + 1)):\n if seg_token_index in x:\n input_ids.extend(x[offset:-1])\n else:\n input_ids.extend(x[offset:])\n \n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids"
},
{
"identifier": "tokenizer_depth_seg_token",
"path": "vcoder_llava/mm_utils.py",
"snippet": "def tokenizer_depth_seg_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, seg_token_index=SEG_TOKEN_INDEX, depth_token_index=DEPTH_TOKEN_INDEX, return_tensors=None): \n if \"<depth>\" in prompt:\n return _tokenizer_depth_token(prompt, tokenizer, image_token_index, seg_token_index, depth_token_index, return_tensors)\n else:\n return tokenizer_seg_token(prompt, tokenizer, image_token_index, seg_token_index, return_tensors)"
},
{
"identifier": "generate_qa_pairs",
"path": "vcoder_llava/data_utils.py",
"snippet": "def generate_qa_pairs(text):\n num_nouns = _obtain_nouns(text)\n qa_pairs = []\n\n for obj, count in num_nouns.items():\n # Count question\n if count == 1:\n plural_obj = p.plural(obj)\n else:\n plural_obj = obj\n count_question = f\"How many {plural_obj} are there in the image?\"\n count_answer = f\"There {'is' if count == 1 else 'are'} {num2words(count)} {obj} in the image.\"\n qa_pairs.append((count_question, count_answer))\n\n prob_positive = np.random.uniform(0,1.)\n\n if prob_positive > 0.7 or count == 1:\n numeric_presence_question = f\"{'Is' if count == 1 else 'Are'} there {num2words(count)} {obj} in the image?\"\n numeric_presence_answer = \"Yes.\"\n elif count > 1:\n numbers = [i for i in range(2, count + 6) if i != count]\n # Select a random number from the range\n cnt = random.choice(numbers)\n numeric_presence_question = f\"{'Is' if cnt == 1 else 'Are'} there {num2words(cnt)} {obj} in the image?\"\n numeric_presence_answer = \"No.\"\n \n qa_pairs.append((numeric_presence_question, numeric_presence_answer))\n random.shuffle(qa_pairs)\n\n return random.sample(qa_pairs, min(len(qa_pairs), random.choice([1, 2, 3, 4, 5, 6])))"
},
{
"identifier": "get_peft_state_maybe_zero_3",
"path": "vcoder_llava/train/train.py",
"snippet": "def get_peft_state_maybe_zero_3(named_params, bias):\n if bias == \"none\":\n to_return = {k: t for k, t in named_params if \"lora_\" in k}\n elif bias == \"all\":\n to_return = {k: t for k, t in named_params if \"lora_\" in k or \"bias\" in k}\n elif bias == \"lora_only\":\n to_return = {}\n maybe_lora_bias = {}\n lora_bias_names = set()\n for k, t in named_params:\n if \"lora_\" in k:\n to_return[k] = t\n bias_name = k.split(\"lora_\")[0] + \"bias\"\n lora_bias_names.add(bias_name)\n elif \"bias\" in k:\n maybe_lora_bias[k] = t\n for k, t in maybe_lora_bias:\n if bias_name in lora_bias_names:\n to_return[bias_name] = t\n else:\n raise NotImplementedError\n to_return = {k: maybe_zero_3(v, ignore_status=True) for k, v in to_return.items()}\n return to_return"
},
{
"identifier": "get_peft_state_non_lora_maybe_zero_3",
"path": "vcoder_llava/train/train.py",
"snippet": "def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):\n to_return = {k: t for k, t in named_params if \"lora_\" not in k}\n if require_grad_only:\n to_return = {k: t for k, t in to_return.items() if t.requires_grad}\n to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}\n return to_return"
},
{
"identifier": "get_mm_adapter_state_maybe_zero_3",
"path": "vcoder_llava/train/train.py",
"snippet": "def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):\n to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}\n to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}\n return to_return"
},
{
"identifier": "find_all_linear_names",
"path": "vcoder_llava/train/train.py",
"snippet": "def find_all_linear_names(model):\n cls = torch.nn.Linear\n lora_module_names = set()\n multimodal_keywords = ['mm_projector', 'vision_tower', 'vision_resampler']\n for name, module in model.named_modules():\n if any(mm_keyword in name for mm_keyword in multimodal_keywords):\n continue\n if isinstance(module, cls):\n names = name.split('.')\n lora_module_names.add(names[0] if len(names) == 1 else names[-1])\n\n if 'lm_head' in lora_module_names: # needed for 16-bit\n lora_module_names.remove('lm_head')\n return list(lora_module_names)"
},
{
"identifier": "DEPTH_QUESTIONS",
"path": "vcoder_llava/questions.py",
"snippet": "DEPTH_QUESTIONS = [\n \"what is depth order of objects in the image?\",\n \"Can you describe the depth order of the objects in this image, from closest to farthest?\",\n \"Which objects in the image appear nearest to the viewer and which seem furthest away?\",\n \"Could you list the objects in the image in order of their perceived distance from the foreground to the background?\",\n \"In what order do the objects in this image appear based on their depth, starting from the closest?\",\n \"How would you rank the objects in this picture from the most proximal to the most distal?\",\n \"Can you arrange the objects seen here from those appearing closest to those appearing farthest?\",\n \"What is the sequence of objects in this image based on their distance from the front to the back?\",\n \"Please identify the order of objects in terms of depth perspective in this image.\",\n \"Which objects in the picture seem to be in the front, and which ones appear to be in the back?\",\n \"How are the objects in this image layered in depth, from the one nearest to the camera to the one farthest?\",\n \"Could you sort the objects in this photo from foreground to background?\",\n \"In this image, what is the spatial arrangement of objects from closest to furthest?\",\n \"Can you pinpoint the depth hierarchy of these objects, starting from the closest?\",\n \"What's the depth sequence of the objects displayed in this picture?\",\n \"From nearest to furthest, how would you order the objects in this image?\",\n \"How would you describe the spatial positioning of these objects in terms of their depth?\",\n \"Can you determine the depth placement of each object in this photo, starting with the nearest?\",\n \"What is the arrangement of objects in this scene by depth?\",\n \"Could you outline the depth profile of the objects in this image?\",\n \"In what depth order do the objects in this image align, from the frontmost to the rearmost?\",\n \"How are the objects in this image ordered in terms of their relative distance from the observer?\",\n]"
},
{
"identifier": "SEMANTIC_QUESTIONS",
"path": "vcoder_llava/questions.py",
"snippet": "SEMANTIC_QUESTIONS = [\n \"What objects can be seen in the image? Perceive as done for semantic segmentation.\",\n \"What items are depicted in the picture? Consider in terms of semantic segmentation.\",\n \"Which elements are present in the visual? Analyze as you would for semantic segmentation.\",\n \"Can you identify the objects in the image? Think from a semantic segmentation perspective.\",\n \"What are the components visible in the graphic? Examine as if segmenting semantically.\",\n \"Which entities can be spotted in the photo? View through the lens of semantic segmentation.\",\n \"What are the discernible objects in the snapshot? Envision in relation to semantic segmentation.\",\n \"What elements stand out in the illustration? Reflect upon it as for semantic segmentation.\",\n \"Can you spot any items within the visual representation? Contemplate in a semantic segmentation context.\",\n \"What features are evident in this visual content? Analyze with semantic segmentation in mind.\",\n \"Which objects are noticeable in the image? Think of it in terms of semantic layers.\",\n \"How would you categorize the objects in this picture? As if you're doing semantic segmentation.\",\n \"What constituents can you recognize in the image? Ponder considering semantic segmentation.\",\n \"Which components can be distinguished in the photo? Evaluate as per semantic segmentation guidelines.\",\n \"What items in the image can you point out? Interpret with a semantic segmentation approach.\",\n \"Can you enumerate the objects present in this visual? Think semantically.\",\n \"What do you observe in the graphic? Consider its semantic segments.\",\n \"How many distinct objects can you identify in the visual? Keeping semantic segmentation in perspective.\",\n \"Which items are apparent in this depiction? Assess as one would for semantic segmentation.\",\n \"What are the visible entities within this image? Delve into it semantically.\",\n \"Can you discern specific objects in the portrayal? Approach it from a semantic segmentation standpoint.\",\n]"
},
{
"identifier": "INSTANCE_QUESTIONS",
"path": "vcoder_llava/questions.py",
"snippet": "INSTANCE_QUESTIONS = [\n \"What objects can be seen in the image? Perceive as done for instance segmentation\",\n \"What items are visible in the picture? Analyze as you would for instance segmentation.\",\n \"Which elements are present in the visual? Consider from an instance segmentation perspective.\",\n \"What are the distinguishable objects in the image? Think in terms of instance segmentation.\",\n \"Can you identify the entities in the graphic? Approach it with instance segmentation in mind.\",\n \"What components are apparent in the photo? Examine as if performing instance segmentation.\",\n \"Which items can be detected in the snapshot? View it through the lens of instance segmentation.\",\n \"What features stand out in the illustration? Reflect upon it as for instance segmentation.\",\n \"How would you describe the objects in this image? Keeping instance segmentation as a reference.\",\n \"What constituents are evident in the visual content? Think from an instance segmentation standpoint.\",\n \"Which objects can you spot in the depiction? Evaluate as per instance segmentation guidelines.\",\n \"What do you observe in the graphic? Contemplate with instance segmentation considerations.\",\n \"Can you discern specific entities in the visual? Approach it in the context of instance segmentation.\",\n \"Which components in the image catch your eye? Think of it in relation to instance layers.\",\n \"How many distinct items can you pinpoint in the photo? With an instance segmentation approach.\",\n \"What elements are noticeable in this portrayal? Analyze while considering instance segmentation.\",\n \"Can you list the objects present in the visual representation? Reflecting on instance segmentation.\",\n \"What items in the snapshot can you recognize? Interpret with an instance segmentation perspective.\",\n \"Which entities are discernible in this depiction? Delve into it from an instance segmentation angle.\",\n \"What are the components you can spot within the image? Think instance-wise.\",\n \"Can you detail the objects in the visual? Assess as one would for instance segmentation.\",\n]"
},
{
"identifier": "PANOPTIC_QUESTIONS",
"path": "vcoder_llava/questions.py",
"snippet": "PANOPTIC_QUESTIONS = [\n \"What objects can be seen in the image? Perceive as done for panoptic segmentation\",\n \"What items are evident in the picture? Analyze with a panoptic segmentation perspective.\",\n \"Which elements emerge in the visual? Think in terms of panoptic segmentation.\",\n \"What are the discernible objects in the graphic? Approach it from a panoptic segmentation viewpoint.\",\n \"Can you identify the entities within the image? Consider it as you would for panoptic segmentation.\",\n \"What components stand out in the photo? Examine with panoptic segmentation in mind.\",\n \"Which items are detectable in the snapshot? Reflect upon it with panoptic segmentation considerations.\",\n \"What features can be observed in the illustration? View through the lens of panoptic segmentation.\",\n \"How would you describe the objects in this depiction? Keeping panoptic segmentation as a reference.\",\n \"What constituents are visible in the visual content? Think from a panoptic segmentation standpoint.\",\n \"Which objects can you pinpoint in the image? Evaluate as per panoptic segmentation guidelines.\",\n \"What do you perceive in the graphic? Delve into it with panoptic segmentation insights.\",\n \"Can you spot specific components in the visual? Contextualize with panoptic segmentation.\",\n \"What items in the portrayal catch your attention? Think in relation to panoptic layers.\",\n \"How many distinct entities can you recognize in the photo? With a panoptic segmentation approach.\",\n \"What elements are present in this visual? Analyze while keeping panoptic segmentation in mind.\",\n \"Can you list the objects depicted in the visual representation? Reflecting on panoptic segmentation.\",\n \"Which features in the image can you discern? Interpret considering panoptic segmentation.\",\n \"What are the components evident in this depiction? Approach it using a panoptic segmentation angle.\",\n \"What items can you detect in the visual content? Think panoptically.\",\n \"Can you detail the entities present in the image? Assess as one would when considering panoptic segmentation.\",\n]"
}
] | import os
import copy
import pathlib
import numpy as np
import random
import torch
import transformers
import json
import re
from dataclasses import dataclass, field
from typing import Dict, Optional, Sequence
from vcoder_llava.constants import IGNORE_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_SEG_TOKEN, DEFAULT_DEPTH_TOKEN
from torch.utils.data import Dataset
from vcoder_llava.train.vcoder_ds_llava_trainer import VCoderDSLLaVATrainer
from vcoder_llava import vcoder_conversation as conversation_lib
from vcoder_llava.model import *
from vcoder_llava.mm_utils import tokenizer_image_token, tokenizer_seg_token, tokenizer_depth_seg_token
from vcoder_llava.data_utils import generate_qa_pairs
from .train import (
get_peft_state_maybe_zero_3,
get_peft_state_non_lora_maybe_zero_3,
get_mm_adapter_state_maybe_zero_3,
find_all_linear_names,
)
from vcoder_llava.questions import DEPTH_QUESTIONS, SEMANTIC_QUESTIONS, INSTANCE_QUESTIONS, PANOPTIC_QUESTIONS
from PIL import Image
from transformers import BitsAndBytesConfig
from peft import prepare_model_for_kbit_training
from peft import LoraConfig, get_peft_model
from peft.tuners.lora import LoraLayer | 8,151 | else:
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
tokenizer.pad_token = tokenizer.unk_token
if model_args.version in conversation_lib.conv_templates:
conversation_lib.default_conversation = conversation_lib.conv_templates[model_args.version]
else:
conversation_lib.default_conversation = conversation_lib.conv_templates["vicuna_v1"]
# vision modules
model.get_vision_tower().load_model()
data_args.image_processor = model.get_vision_tower().image_processor
data_args.is_multimodal = True
vision_tower = model.get_vision_tower()
vision_tower.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device)
model.config.image_aspect_ratio = data_args.image_aspect_ratio
model.config.image_grid_pinpoints = data_args.image_grid_pinpoints
model.config.tune_mm_mlp_adapter = training_args.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter
if model_args.tune_mm_mlp_adapter:
model.requires_grad_(False)
for p in model.get_model().mm_projector.parameters():
p.requires_grad = True
model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter
if training_args.freeze_mm_mlp_adapter:
for p in model.get_model().mm_projector.parameters():
p.requires_grad = False
if training_args.bits in [4, 8]:
model.get_model().mm_projector.to(dtype=compute_dtype, device=training_args.device)
if model_args.seg_tune_adapter is not None:
model.get_model().initialize_seg_modules(
model_args=model_args,
fsdp=training_args.fsdp
)
if model_args.seg_tune_adapter:
if getattr(model_args, "freeze_llm", False):
model.requires_grad_(False)
for p in model.get_model().seg_mm_projector.parameters():
p.requires_grad = True
for p in model.get_model().vcoder_lm_emb.parameters():
p.requires_grad = True
data_args.seg_image_processor = model.get_vision_tower().image_processor
model.config.use_mm2_proj = model_args.use_mm2_proj
model.config.mm_vcoder_lm_emb = True
model.config.seg_tune_adapter = training_args.seg_tune_adapter = model_args.seg_tune_adapter
model.config.freeze_seg_mm_mlp_adapter = training_args.freeze_seg_mm_mlp_adapter
if training_args.freeze_seg_mm_mlp_adapter:
for p in model.get_model().seg_mm_projector.parameters():
p.requires_grad = False
if model_args.use_mm2_proj:
for p in model.get_model().mm2_projector.parameters():
p.requires_grad = False
if training_args.bits in [4, 8]:
model.get_model().seg_mm_projector.to(dtype=compute_dtype, device=training_args.device)
else:
# seg modules
data_args.seg_image_processor = model.get_vision_tower().image_processor
if training_args.bits in [4, 8]:
model.get_model().seg_mm_projector.to(dtype=compute_dtype, device=training_args.device)
if model_args.depth_tune_adapter is not None:
model.get_model().initialize_depth_modules(
model_args=model_args,
fsdp=training_args.fsdp
)
if getattr(model_args, "freeze_llm", False):
model.requires_grad_(False)
for p in model.get_model().depth_mm_projector.parameters():
p.requires_grad = True
for p in model.get_model().vcoder_lm_emb.parameters():
p.requires_grad = True
if model_args.seg_tune_adapter:
for p in model.get_model().seg_mm_projector.parameters():
p.requires_grad = True
data_args.depth_image_processor = model.get_vision_tower().image_processor
model.config.depth_tune_adapter = training_args.depth_tune_adapter = model_args.depth_tune_adapter
model.config.freeze_depth_mm_mlp_adapter = training_args.freeze_depth_mm_mlp_adapter
if training_args.freeze_depth_mm_mlp_adapter:
for p in model.get_model().depth_mm_projector.parameters():
p.requires_grad = False
if training_args.bits in [4, 8]:
model.get_model().depth_mm_projector.to(dtype=compute_dtype, device=training_args.device)
if training_args.bits in [4, 8]:
for name, module in model.named_modules():
if isinstance(module, LoraLayer):
if training_args.bf16:
module = module.to(torch.bfloat16)
if 'norm' in name:
module = module.to(torch.float32)
if 'lm_head' in name or 'embed_tokens' in name:
if hasattr(module, 'weight'):
if training_args.bf16 and module.weight.dtype == torch.float32:
module = module.to(torch.bfloat16)
data_module = make_supervised_data_module(tokenizer=tokenizer,
data_args=data_args)
| # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
local_rank = None
def rank0_print(*args):
if local_rank == 0:
print(*args)
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
version: Optional[str] = field(default="v1")
freeze_backbone: bool = field(default=False)
tune_mm_mlp_adapter: bool = field(default=False)
mm_vision_select_layer: Optional[int] = field(default=-1) # default to the last layer
mm_projector_type: Optional[str] = field(default='linear')
freeze_llm: bool = field(default=False)
use_mm2_proj: bool = field(default=False)
pretrain_mm2_mlp_adapter: Optional[str] = field(default=None)
seg_tune_adapter: bool = field(default=False)
mm_seg_select_layer: Optional[int] = field(default=-2) # default to the last layer
seg_mm_projector_type: Optional[str] = field(default='linear')
depth_tune_adapter: bool = field(default=False)
mm_depth_select_layer: Optional[int] = field(default=-2) # default to the last layer
depth_mm_projector_type: Optional[str] = field(default='linear')
mm_vision_select_feature: Optional[str] = field(default="patch")
mm_seg_select_feature: Optional[str] = field(default="patch")
mm_depth_select_feature: Optional[str] = field(default="patch")
@dataclass
class DataArguments:
data_path: str = field(default=None,
metadata={"help": "Path to the training data."})
depth_data_path: str = field(default=None,
metadata={"help": "Path to the seg training data."})
lazy_preprocess: bool = False
is_multimodal: bool = False
image_folder: Optional[str] = field(default=None)
seg_image_folder: Optional[str] = field(default=None)
depth_image_folder: Optional[str] = field(default=None)
image_aspect_ratio: str = 'square'
image_grid_pinpoints: Optional[str] = field(default=None)
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
remove_unused_columns: bool = field(default=False)
freeze_mm_mlp_adapter: bool = field(default=False)
freeze_seg_mm_mlp_adapter: bool = field(default=False)
freeze_depth_mm_mlp_adapter: bool = field(default=False)
mpt_attn_impl: Optional[str] = field(default="triton")
model_max_length: int = field(
default=512,
metadata={
"help":
"Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
double_quant: bool = field(
default=True,
metadata={"help": "Compress the quantization statistics through double quantization."}
)
quant_type: str = field(
default="nf4",
metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."}
)
bits: int = field(
default=16,
metadata={"help": "How many bits to use."}
)
lora_enable: bool = False
lora_r: int = 64
lora_alpha: int = 16
lora_dropout: float = 0.05
lora_weight_path: str = ""
lora_bias: str = "none"
group_by_modality_length: bool = field(default=False)
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer,
output_dir: str):
"""Collects the state dict and dump to disk."""
if trainer.deepspeed:
torch.cuda.synchronize()
trainer.save_model(output_dir)
return
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {
key: value.cpu()
for key, value in state_dict.items()
}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
def depth_seg_preprocess_v1(
sources,
tokenizer: transformers.PreTrainedTokenizer,
has_image: bool = False,
has_seg: bool = False,
has_depth: bool = False,
) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
# Apply prompt templates
conversations = []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != conv.roles[0]:
# Skip the first one if it is not from human
source = source[1:]
conv.messages = []
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
assert role == conv.roles[j % 2], f"{i}"
conv.append_message(role, sentence["value"])
conversations.append(conv.get_prompt())
# Tokenize conversations
if has_image and has_seg:
if has_depth:
input_ids = torch.stack([tokenizer_depth_seg_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0)
else:
input_ids = torch.stack([tokenizer_seg_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0)
elif has_image:
input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0)
else:
input_ids = tokenizer(
conversations,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
targets = input_ids.clone()
assert conv.sep_style == conversation_lib.SeparatorStyle.TWO
# Mask targets
sep = conv.sep + conv.roles[1] + ": "
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
for i, rou in enumerate(rounds):
if rou == "":
break
parts = rou.split(sep)
if len(parts) != 2:
break
parts[0] += sep
if has_image and has_seg:
if has_depth:
round_len = len(tokenizer_depth_seg_token(rou, tokenizer))
instruction_len = len(tokenizer_depth_seg_token(parts[0], tokenizer)) - 3
else:
round_len = len(tokenizer_seg_token(rou, tokenizer))
instruction_len = len(tokenizer_seg_token(parts[0], tokenizer)) - 2
elif has_image:
round_len = len(tokenizer_image_token(rou, tokenizer))
instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
else:
round_len = len(tokenizer(rou).input_ids)
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_INDEX
print(
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
f" (ignored)"
)
return dict(
input_ids=input_ids,
labels=targets,
)
def vcoder_ds_preprocess_multimodal(
sources: Sequence[str],
data_args: DataArguments
) -> Dict:
is_multimodal = data_args.is_multimodal
if not is_multimodal:
return sources
for source in sources:
for sentence in source:
if DEFAULT_IMAGE_TOKEN in sentence['value']:
sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip()
sentence['value'] = DEFAULT_IMAGE_TOKEN + '\n' + sentence['value']
sentence['value'] = sentence['value'].strip()
replace_token = DEFAULT_IMAGE_TOKEN
sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token)
if DEFAULT_SEG_TOKEN in sentence['value']:
sentence['value'] = sentence['value'].replace(DEFAULT_SEG_TOKEN, '').strip()
sentence['value'] = DEFAULT_SEG_TOKEN + '\n' + sentence['value']
sentence['value'] = sentence['value'].strip()
replace_token = DEFAULT_SEG_TOKEN
sentence["value"] = sentence["value"].replace(DEFAULT_SEG_TOKEN, replace_token)
if DEFAULT_DEPTH_TOKEN in sentence['value']:
sentence['value'] = sentence['value'].replace(DEFAULT_DEPTH_TOKEN, '').strip()
sentence['value'] = DEFAULT_DEPTH_TOKEN + '\n' + sentence['value']
sentence['value'] = sentence['value'].strip()
replace_token = DEFAULT_DEPTH_TOKEN
sentence["value"] = sentence["value"].replace(DEFAULT_DEPTH_TOKEN, replace_token)
return sources
def preprocess(
sources: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer,
has_image: bool = False,
has_seg: bool = False,
has_depth: bool = False
) -> Dict:
"""
Given a list of sources, each is a conversation list. This transform:
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
2. Concatenate conversations together;
3. Tokenize the concatenated conversation;
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
"""
if conversation_lib.default_conversation.version.startswith("v1"):
return depth_seg_preprocess_v1(sources, tokenizer, has_image=has_image, has_seg=has_seg, has_depth=has_depth)
raise ValueError(f"Unknown conversation version: {conversation_lib.default_conversation.version}")
def _obtain_depth_texts(file_path):
with open(file_path) as f:
lines = f.readlines()
depth_labels = {}
for line in lines:
key = line.split("<IMG>")[1].strip("\n")
label = line.split("<IMG>")[2].strip("\n")
depth_labels[key] = label
return depth_labels
def _obtain_seg_texts(file_path):
def _remove_specific_word(text, word_to_remove):
tokens = re.findall(r'\b\w+\b|[,.]', text)
result_tokens = []
word_found = False
for i, token in enumerate(tokens):
if token == word_to_remove:
if not word_found:
# Keep the first occurrence and mark it as found
result_tokens.append(token)
word_found = True
else:
# Remove any preceding punctuation if it's just before this word
if i > 0 and tokens[i-1] in {',', '.'}:
result_tokens.pop()
else:
result_tokens.append(token)
# Join tokens and clean up spaces before punctuation
result_text = ' '.join(result_tokens)
result_text = re.sub(r'\s([,.](?:\s|$))', r'\1', result_text)
return result_text
with open(file_path) as f:
lines = f.readlines()
seg_labels = {}
for line in lines:
key = line.split("<IMG>")[1].strip("\n")
label = line.split("<IMG>")[2].strip("\n")
label = _remove_specific_word(label, "wall")
label = _remove_specific_word(label, "window")
seg_labels[key] = label
return seg_labels
def obtain_seg_data_splits(data_args):
def _get_labels(folder):
return _obtain_seg_texts(os.path.join(data_args.seg_image_folder, folder, "panoptic.txt"))
list_data_dict = []
data_dict = json.load(open(data_args.data_path, "r"))
for l in data_dict:
if "image" in l.keys():
if os.path.exists(os.path.join(data_args.image_folder, l["image"])):
l["seg"] = l["image"].split("/")[-1]
if "coco" in l["image"]:
l["seg_folder"] = "coco_segm_text/train/panoptic_inference"
elif "gqa" in l["image"]:
l["seg_folder"] = "gqa/seg_images/panoptic_inference"
elif "VG_100K_2" in l["image"]:
l["seg_folder"] = "vg/vg/SEG_VG_100K_2/panoptic_inference"
elif "VG_100K" in l["image"]:
l["seg_folder"] = "vg/vg/SEG_VG_100K/panoptic_inference"
elif "ocr_vqa" in l["image"]:
l["seg_folder"] = "ocr_vqa/seg_images/panoptic_inference"
if "textvqa" in l["image"]:
l["seg_folder"] = "textvqa/seg_images/panoptic_inference"
conversations = []
for c in l["conversations"]:
if "<image>" in c["value"]:
c["value"] = c["value"].replace("<image>", "<image>\n<seg>")
conversations.append(c)
l["conversations"] = conversations
if len(conversations) > 0:
list_data_dict.append(l)
labels_dict = {
"coco_segm_text/train": _get_labels("coco_segm_text/train/"),
"gqa/seg_images": _get_labels("gqa/seg_images/"),
"vg/vg/SEG_VG_100K": _get_labels("vg/vg/SEG_VG_100K/"),
"vg/vg/SEG_VG_100K_2": _get_labels("vg/vg/SEG_VG_100K_2/"),
"ocr_vqa/seg_images": _get_labels("ocr_vqa/seg_images"),
"textvqa/seg_images": _get_labels("textvqa/seg_images/"),
}
random.shuffle(list_data_dict)
list_data_dict = list_data_dict[:200000]
final_list_data_dict = []
for l in list_data_dict:
prob_add = np.random.uniform(0,1.)
if prob_add > 0.7:
labels = labels_dict[l["seg_folder"].split("/panoptic_inference")[0]]
conversations = l["conversations"]
even_indices = list(range(2, len(conversations) + 1, 2))
random_even_index = random.choice(even_indices)
question_prob = np.random.uniform(0,1.)
if question_prob > 0.90:
question = "What objects can be seen in the image?"
else:
question = random.choice(PANOPTIC_QUESTIONS)
conv = [{
"from": "human",
"value": question
},
{
"from": "gpt",
"value": labels[l["seg"]]
}]
final_conversations = conversations[:random_even_index] + conv + conversations[random_even_index:]
l["conversations"] = final_conversations
final_list_data_dict.append(l)
return final_list_data_dict
def obtain_seg_depth_data_splits(data_args):
data_dict = json.load(open(data_args.data_path, "r"))
list_data_dict = []
labels = _obtain_depth_texts(os.path.join(data_args.depth_data_path, "coco_segm_text", "depth", "train", "panoptic_order.txt"))
for l in data_dict:
if "image" in l.keys():
if os.path.exists(os.path.join(data_args.image_folder, l["image"])):
if "coco" in l["image"]:
l["depth"] = l["image"].split("/")[-1]
l["seg"] = l["image"].split("/")[-1]
l["seg_folder"] = "coco_segm_text/train/panoptic_inference"
l["depth_folder"] = "coco_segm_text/depth/train/depth"
conversations = []
for c in l["conversations"]:
if "<image>" in c["value"]:
c["value"] = c["value"].replace("<image>", "<image>\n<seg>\n<depth>")
conversations.append(c)
l["conversations"] = conversations
if len(conversations) > 0:
list_data_dict.append(l)
random.shuffle(list_data_dict)
list_data_dict = list_data_dict[:100000]
final_list_data_dict = []
for l in list_data_dict:
prob_add = np.random.uniform(0,1.)
if prob_add > 0.7:
conversations = l["conversations"]
even_indices = list(range(2, len(conversations) + 1, 2))
random_even_index = random.choice(even_indices)
conv = [{
"from": "human",
"value": random.choice(DEPTH_QUESTIONS)
},
{
"from": "gpt",
"value": labels[l["seg"]]
}]
final_conversations = conversations[:random_even_index] + conv + conversations[random_even_index:]
l["conversations"] = final_conversations
final_list_data_dict.append(l)
return final_list_data_dict
def get_object_data_depth_split(data_args):
list_data_dict = []
for bucket in ["train", "unlabeled", "test"]:
panoptic_labels = _obtain_seg_texts(os.path.join(data_args.seg_image_folder, "coco_segm_text", bucket, "panoptic.txt"))
for key in panoptic_labels.keys():
question_prob = np.random.uniform(0,1.)
answer = panoptic_labels[key]
if question_prob > 0.90:
question = "What objects can be seen in the image?"
else:
question = random.choice(PANOPTIC_QUESTIONS)
seg_folder = "panoptic_inference"
question += "\n<image>\n<seg>\n<depth>"
conversations = [
{
"from": "human",
"value": question
},
{
"from": "gpt",
"value": answer
},
]
list_data_dict.append(
{
"conversations": conversations,
"image": "coco/" + bucket + "2017/" + key,
"seg": key,
"depth": key,
"seg_folder": "coco_segm_text/" + bucket + "/" + seg_folder,
"depth_folder": "coco_segm_text/depth/" + bucket + "/" + "depth"
}
)
random.shuffle(list_data_dict)
return list_data_dict[:50000]
def get_object_data_split(data_args):
list_data_dict = []
for bucket in ["train", "unlabeled", "test"]:
panoptic_labels = _obtain_seg_texts(os.path.join(data_args.seg_image_folder, "coco_segm_text", bucket, "panoptic.txt"))
semantic_labels = _obtain_seg_texts(os.path.join(data_args.seg_image_folder, "coco_segm_text", bucket, "semantic.txt"))
instance_labels = _obtain_seg_texts(os.path.join(data_args.seg_image_folder, "coco_segm_text", bucket, "instance.txt"))
for key in panoptic_labels.keys():
assert key in semantic_labels.keys() and key in instance_labels.keys(), "Instance, semantic, and panoptic labels should have the same keys."
prob_task = np.random.uniform(0,1.)
question_prob = np.random.uniform(0,1.)
if prob_task < 0.33:
answer = semantic_labels[key]
if question_prob > 0.90:
question = "What objects can be seen in the image?"
else:
question = random.choice(SEMANTIC_QUESTIONS)
seg_folder = "semantic_inference"
elif prob_task < 0.66:
answer = instance_labels[key]
if question_prob > 0.90:
question = "What objects can be seen in the image?"
else:
question = random.choice(INSTANCE_QUESTIONS)
seg_folder = "instance_inference"
else:
answer = panoptic_labels[key]
if question_prob > 0.90:
question = "What objects can be seen in the image?"
else:
question = random.choice(PANOPTIC_QUESTIONS)
seg_folder = "panoptic_inference"
question += "\n<image>\n<seg>"
conversations = [
{
"from": "human",
"value": question
},
{
"from": "gpt",
"value": answer
},
]
list_data_dict.append(
{
"conversations": conversations,
"image": "coco/" + bucket + "2017/" + key,
"seg": key,
"seg_folder": "coco_segm_text/" + bucket + "/" + seg_folder
}
)
random.shuffle(list_data_dict)
return list_data_dict
def get_depth_data_split(data_args):
list_data_dict = []
for bucket in ["train", "unlabeled", "test"]:
labels = _obtain_depth_texts(os.path.join(data_args.depth_data_path, "coco_segm_text", "depth", bucket, "panoptic_order.txt"))
for key in labels.keys():
answer = labels[key]
question = random.choice(DEPTH_QUESTIONS)
question += "\n<image>\n<seg>\n<depth>"
seg_folder = "panoptic_inference"
conversations = [
{
"from": "human",
"value": question
},
{
"from": "gpt",
"value": answer
},
]
list_data_dict.append(
{
"conversations": conversations,
"image": "coco/" + bucket + "2017/" + key,
"seg": key,
"depth": key,
"seg_folder": "coco_segm_text/" + bucket + "/" + seg_folder,
"depth_folder": "coco_segm_text/depth/" + bucket + "/" + "depth"
}
)
random.shuffle(list_data_dict)
return list_data_dict
def get_extra_count_data_split(data_args):
list_data_dict = []
bucket = "train"
panoptic_labels = _obtain_seg_texts(os.path.join(data_args.seg_image_folder, "coco_segm_text", bucket, "panoptic.txt"))
for key in panoptic_labels.keys():
prob = np.random.uniform(0,1.)
if prob > 0.99:
answer = panoptic_labels[key]
seg_folder = "panoptic_inference"
qa_pairs = generate_qa_pairs(answer)
if len(qa_pairs) >= 1:
conversations = []
for idx, qa_pair in enumerate(qa_pairs):
conversations.append(
{
"from": "human",
"value": qa_pair[0] + "\n<image>\n<seg>" if idx == 0 else qa_pair[0]
}
)
conversations.append(
{
"from": "gpt",
"value": qa_pair[1]
}
)
list_data_dict.append(
{
"conversations": conversations,
"image": "coco/" + bucket + "2017/" + key,
"seg": key,
"seg_folder": "coco_segm_text/" + bucket + "/" + seg_folder
}
)
random.shuffle(list_data_dict)
return list_data_dict
class LazyDepthSegSupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, tokenizer: transformers.PreTrainedTokenizer,
data_args: DataArguments):
super(LazyDepthSegSupervisedDataset, self).__init__()
list_data_dict = []
if data_args.data_path is not None:
print("Preparing dataset, this may take upto 5 minutes...")
seg_data_list = obtain_seg_data_splits(data_args)
list_data_dict.extend(seg_data_list)
depth_data_list = obtain_seg_depth_data_splits(data_args)
list_data_dict.extend(depth_data_list)
depth_object_list = get_object_data_depth_split(data_args)
list_data_dict.extend(depth_object_list)
object_data_list = get_object_data_split(data_args)
list_data_dict.extend(object_data_list)
depth_order_list = get_depth_data_split(data_args)
list_data_dict.extend(depth_order_list)
extra_object_list = get_extra_count_data_split(data_args)
list_data_dict.extend(extra_object_list)
rank0_print("Formatting inputs...Skip in lazy mode")
self.tokenizer = tokenizer
random.shuffle(list_data_dict)
self.list_data_dict = list_data_dict
self.data_args = data_args
def __len__(self):
return len(self.list_data_dict)
@property
def lengths(self):
length_list = []
for sample in self.list_data_dict:
seg_tokens = 128 if 'seg' in sample else 0
img_tokens = 128 if 'image' in sample else 0
depth_tokens = 128 if 'depth' in sample else 0
length_list.append(sum(len(conv['value'].split()) for conv in sample['conversations']) + img_tokens + seg_tokens + depth_tokens)
return length_list
@property
def modality_lengths(self):
length_list = []
for sample in self.list_data_dict:
cur_len = sum(len(conv['value'].split()) for conv in sample['conversations'])
cur_len = cur_len if 'image' in sample else -cur_len
cur_len = cur_len if 'seg' in sample else -cur_len
length_list.append(cur_len)
return length_list
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
sources = self.list_data_dict[i]
if isinstance(i, int):
sources = [sources]
assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
if 'image' in sources[0]:
image_file = self.list_data_dict[i]['image']
image_folder = self.data_args.image_folder
processor = self.data_args.image_processor
image = Image.open(os.path.join(image_folder, image_file)).convert('RGB')
seg_file = self.list_data_dict[i]['seg']
seg_folder = self.data_args.seg_image_folder
seg = Image.open(os.path.join(seg_folder, self.list_data_dict[i]['seg_folder'], seg_file)).convert('RGB')
seg_processor = self.data_args.seg_image_processor
if 'depth' in sources[0]:
depth_file = self.list_data_dict[i]['depth']
depth_folder = self.data_args.depth_data_path
depth = Image.open(os.path.join(depth_folder, self.list_data_dict[i]['depth_folder'], depth_file)).convert('RGB')
depth_processor = self.data_args.depth_image_processor
else:
depth = None
if self.data_args.image_aspect_ratio == 'pad':
def expand2square(pil_img, background_color):
width, height = pil_img.size
if width == height:
return pil_img
elif width > height:
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, (width - height) // 2))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, ((height - width) // 2, 0))
return result
image = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
seg = expand2square(seg, tuple(int(x*255) for x in seg_processor.image_mean))
seg = seg_processor.preprocess(seg, return_tensors='pt')['pixel_values'][0]
if depth is not None:
depth = expand2square(depth, tuple(int(x*255) for x in depth_processor.image_mean))
depth = depth_processor.preprocess(depth, return_tensors='pt')['pixel_values'][0]
else:
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
seg = seg_processor.preprocess(seg, return_tensors='pt')['pixel_values'][0]
if depth is not None:
depth = depth_processor.preprocess(depth, return_tensors='pt')['pixel_values'][0]
sources = vcoder_ds_preprocess_multimodal(
copy.deepcopy([e["conversations"] for e in sources]),
self.data_args)
else:
sources = copy.deepcopy([e["conversations"] for e in sources])
data_dict = preprocess(
sources,
self.tokenizer,
has_image=('image' in self.list_data_dict[i]),
has_seg=('seg' in self.list_data_dict[i]),
has_depth=('depth' in self.list_data_dict[i])
)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict["input_ids"][0],
labels=data_dict["labels"][0])
if 'image' in self.list_data_dict[i]:
data_dict['image'] = image
elif self.data_args.is_multimodal:
crop_size = self.data_args.image_processor.crop_size
data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width'])
if 'seg' in self.list_data_dict[i]:
data_dict['seg'] = seg
elif self.data_args.is_multimodal:
crop_size = self.data_args.seg_image_processor.crop_size
data_dict['seg'] = torch.zeros(3, crop_size['height'], crop_size['width'])
if 'depth' in self.list_data_dict[i]:
data_dict['depth'] = depth
elif self.data_args.is_multimodal:
crop_size = self.data_args.depth_image_processor.crop_size
data_dict['depth'] = torch.zeros(3, crop_size['height'], crop_size['width'])
return data_dict
@dataclass
class DataCollatorForDepthSegSupervisedDataset(object):
"""Collate examples for supervised fine-tuning."""
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
input_ids, labels = tuple([instance[key] for instance in instances]
for key in ("input_ids", "labels"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
input_ids = input_ids[:, :self.tokenizer.model_max_length]
labels = labels[:, :self.tokenizer.model_max_length]
batch = dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
)
if 'image' in instances[0]:
images = [instance['image'] for instance in instances]
if all(x is not None and x.shape == images[0].shape for x in images):
batch['images'] = torch.stack(images)
else:
batch['images'] = images
if 'seg' in instances[0]:
segs = [instance['seg'] for instance in instances]
if all(x is not None and x.shape == segs[0].shape for x in segs):
batch['segs'] = torch.stack(segs)
else:
batch['segs'] = segs
if 'depth' in instances[0]:
depths = [instance['depth'] for instance in instances]
if all(x is not None and x.shape == depths[0].shape for x in depths):
batch['depths'] = torch.stack(depths)
else:
batch['depths'] = depths
return batch
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
data_args) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
train_dataset = LazyDepthSegSupervisedDataset(tokenizer=tokenizer,
data_args=data_args)
data_collator = DataCollatorForDepthSegSupervisedDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset,
eval_dataset=None,
data_collator=data_collator)
def vcoder_ds_train():
global local_rank
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
local_rank = training_args.local_rank
compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
bnb_model_from_pretrained_args = {}
if training_args.bits in [4, 8]:
bnb_model_from_pretrained_args.update(dict(
device_map={"": training_args.device},
load_in_4bit=training_args.bits == 4,
load_in_8bit=training_args.bits == 8,
quantization_config=BitsAndBytesConfig(
load_in_4bit=training_args.bits == 4,
load_in_8bit=training_args.bits == 8,
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=training_args.double_quant,
bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'}
)
))
if model_args.depth_tune_adapter is not None:
if 'mpt' in model_args.model_name_or_path:
raise ValueError("MPT is not supported for VCoder Adapted Training.")
else:
model = VCoderDSLlavaLlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
**bnb_model_from_pretrained_args
)
else:
model = transformers.LlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
**bnb_model_from_pretrained_args
)
model.config.use_cache = False
if model_args.freeze_backbone:
model.model.requires_grad_(False)
if training_args.bits in [4, 8]:
model.config.torch_dtype=(torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing)
if training_args.gradient_checkpointing:
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
if training_args.lora_enable:
lora_config = LoraConfig(
r=training_args.lora_r,
lora_alpha=training_args.lora_alpha,
target_modules=find_all_linear_names(model),
lora_dropout=training_args.lora_dropout,
bias=training_args.lora_bias,
task_type="CAUSAL_LM",
)
if training_args.bits == 16:
if training_args.bf16:
model.to(torch.bfloat16)
if training_args.fp16:
model.to(torch.float16)
rank0_print("Adding LoRA adapters...")
model = get_peft_model(model, lora_config)
if 'mpt' in model_args.model_name_or_path:
raise ValueError("MPT is not supported for VCoder Adapted Training.")
else:
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
tokenizer.pad_token = tokenizer.unk_token
if model_args.version in conversation_lib.conv_templates:
conversation_lib.default_conversation = conversation_lib.conv_templates[model_args.version]
else:
conversation_lib.default_conversation = conversation_lib.conv_templates["vicuna_v1"]
# vision modules
model.get_vision_tower().load_model()
data_args.image_processor = model.get_vision_tower().image_processor
data_args.is_multimodal = True
vision_tower = model.get_vision_tower()
vision_tower.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device)
model.config.image_aspect_ratio = data_args.image_aspect_ratio
model.config.image_grid_pinpoints = data_args.image_grid_pinpoints
model.config.tune_mm_mlp_adapter = training_args.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter
if model_args.tune_mm_mlp_adapter:
model.requires_grad_(False)
for p in model.get_model().mm_projector.parameters():
p.requires_grad = True
model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter
if training_args.freeze_mm_mlp_adapter:
for p in model.get_model().mm_projector.parameters():
p.requires_grad = False
if training_args.bits in [4, 8]:
model.get_model().mm_projector.to(dtype=compute_dtype, device=training_args.device)
if model_args.seg_tune_adapter is not None:
model.get_model().initialize_seg_modules(
model_args=model_args,
fsdp=training_args.fsdp
)
if model_args.seg_tune_adapter:
if getattr(model_args, "freeze_llm", False):
model.requires_grad_(False)
for p in model.get_model().seg_mm_projector.parameters():
p.requires_grad = True
for p in model.get_model().vcoder_lm_emb.parameters():
p.requires_grad = True
data_args.seg_image_processor = model.get_vision_tower().image_processor
model.config.use_mm2_proj = model_args.use_mm2_proj
model.config.mm_vcoder_lm_emb = True
model.config.seg_tune_adapter = training_args.seg_tune_adapter = model_args.seg_tune_adapter
model.config.freeze_seg_mm_mlp_adapter = training_args.freeze_seg_mm_mlp_adapter
if training_args.freeze_seg_mm_mlp_adapter:
for p in model.get_model().seg_mm_projector.parameters():
p.requires_grad = False
if model_args.use_mm2_proj:
for p in model.get_model().mm2_projector.parameters():
p.requires_grad = False
if training_args.bits in [4, 8]:
model.get_model().seg_mm_projector.to(dtype=compute_dtype, device=training_args.device)
else:
# seg modules
data_args.seg_image_processor = model.get_vision_tower().image_processor
if training_args.bits in [4, 8]:
model.get_model().seg_mm_projector.to(dtype=compute_dtype, device=training_args.device)
if model_args.depth_tune_adapter is not None:
model.get_model().initialize_depth_modules(
model_args=model_args,
fsdp=training_args.fsdp
)
if getattr(model_args, "freeze_llm", False):
model.requires_grad_(False)
for p in model.get_model().depth_mm_projector.parameters():
p.requires_grad = True
for p in model.get_model().vcoder_lm_emb.parameters():
p.requires_grad = True
if model_args.seg_tune_adapter:
for p in model.get_model().seg_mm_projector.parameters():
p.requires_grad = True
data_args.depth_image_processor = model.get_vision_tower().image_processor
model.config.depth_tune_adapter = training_args.depth_tune_adapter = model_args.depth_tune_adapter
model.config.freeze_depth_mm_mlp_adapter = training_args.freeze_depth_mm_mlp_adapter
if training_args.freeze_depth_mm_mlp_adapter:
for p in model.get_model().depth_mm_projector.parameters():
p.requires_grad = False
if training_args.bits in [4, 8]:
model.get_model().depth_mm_projector.to(dtype=compute_dtype, device=training_args.device)
if training_args.bits in [4, 8]:
for name, module in model.named_modules():
if isinstance(module, LoraLayer):
if training_args.bf16:
module = module.to(torch.bfloat16)
if 'norm' in name:
module = module.to(torch.float32)
if 'lm_head' in name or 'embed_tokens' in name:
if hasattr(module, 'weight'):
if training_args.bf16 and module.weight.dtype == torch.float32:
module = module.to(torch.bfloat16)
data_module = make_supervised_data_module(tokenizer=tokenizer,
data_args=data_args) | trainer = VCoderDSLLaVATrainer(model=model, | 4 | 2023-12-17 07:46:27+00:00 | 12k |
DeepWok/mase | machop/chop/models/manual/opt_lora/modeling_opt_lora.py | [
{
"identifier": "LoraLayer",
"path": "machop/chop/models/manual/lora_modules.py",
"snippet": "class LoraLayer:\n def __init__(self, in_features: int, out_features: int, **kwargs):\n self.r = {}\n self.lora_alpha = {}\n self.scaling = {}\n self.lora_dropout = nn.ModuleDict({})\n self.lora_A = nn.ModuleDict({})\n self.lora_B = nn.ModuleDict({})\n # For Embedding layer\n self.lora_embedding_A = nn.ParameterDict({})\n self.lora_embedding_B = nn.ParameterDict({})\n # Mark the weight as unmerged\n self.merged = False\n self.disable_adapter = False\n self.in_features = in_features\n self.out_features = out_features\n self.kwargs = kwargs\n init_lora_weights = bool(field(default=True))\n\n def update_layer(\n self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights\n ):\n self.r[adapter_name] = r\n self.lora_alpha[adapter_name] = lora_alpha\n if lora_dropout > 0.0:\n lora_dropout_layer = nn.Dropout(p=lora_dropout)\n else:\n lora_dropout_layer = nn.Identity()\n\n self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))\n # Actual trainable parameters\n if self.disable_adapter == False:\n if r > 0:\n self.lora_A.update(\n nn.ModuleDict(\n {adapter_name: nn.Linear(self.in_features, r, bias=False)}\n )\n )\n self.lora_B.update(\n nn.ModuleDict(\n {adapter_name: nn.Linear(r, self.out_features, bias=False)}\n )\n )\n self.scaling[adapter_name] = lora_alpha / r\n else:\n pass\n\n if init_lora_weights:\n self.reset_lora_parameters(adapter_name)\n self.to(self.weight.device)\n\n def reset_lora_parameters(self, adapter_name):\n if adapter_name in self.lora_A.keys():\n # initialize A the same way as the default for nn.Linear and B to zero\n nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_B[adapter_name].weight)\n if adapter_name in self.lora_embedding_A.keys():\n # initialize a the same way as the default for nn.linear and b to zero\n nn.init.zeros_(self.lora_embedding_A[adapter_name])\n nn.init.normal_(self.lora_embedding_B[adapter_name])"
},
{
"identifier": "LinearLora",
"path": "machop/chop/models/manual/lora_modules.py",
"snippet": "class LinearLora(nn.Linear, LoraLayer):\n # Lora implemented in a dense layer\n def __init__(\n self,\n in_features: int,\n out_features: int,\n config: dict = None,\n **kwargs,\n ):\n self.config = config\n init_lora_weights = self.config.get(\"init_lora_weights\", True)\n\n r, lora_alpha, lora_dropout, adapter_name, disable_adapter = (\n config[\"r\"],\n config[\"lora_alpha\"],\n config[\"lora_dropout\"],\n config[\"adapter_name\"],\n config[\"disable_adapter\"],\n )\n lora_dropout = float(lora_dropout)\n\n nn.Linear.__init__(self, in_features, out_features, **kwargs)\n LoraLayer.__init__(self, in_features=in_features, out_features=out_features)\n # Freezing the pre-trained weight matrix\n self.weight.requires_grad = False\n self.disable_adapter = disable_adapter\n self.fan_in_fan_out = config.get(\"fan_in_fan_out\", False)\n self.is_target_conv_1d_layer = config.get(\"is_target_conv_1d_layer\", False)\n\n if self.fan_in_fan_out:\n self.weight.data = self.weight.data.T\n\n nn.Linear.reset_parameters(self)\n self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)\n self.active_adapter = adapter_name\n self.is_target_conv_1d_layer = self.is_target_conv_1d_layer\n\n def merge(self):\n if self.active_adapter not in self.lora_A.keys():\n return\n if self.merged:\n warnings.warn(\"Already merged. Nothing to do.\")\n return\n if self.r[self.active_adapter] > 0:\n self.weight.data += self.get_delta_weight(self.active_adapter)\n self.merged = True\n\n def unmerge(self):\n if self.active_adapter not in self.lora_A.keys():\n return\n if not self.merged:\n warnings.warn(\"Already unmerged. Nothing to do.\")\n return\n if self.r[self.active_adapter] > 0:\n self.weight.data -= self.get_delta_weight(self.active_adapter)\n self.merged = False\n\n def get_delta_weight(self, adapter):\n return (\n transpose(\n self.lora_B[adapter].weight @ self.lora_A[adapter].weight,\n self.fan_in_fan_out,\n )\n * self.scaling[adapter]\n )\n\n def _linear(self, input: torch.Tensor) -> torch.Tensor:\n return F.linear(\n input, transpose(self.weight, self.fan_in_fan_out), bias=self.bias\n )\n\n def forward(self, x: torch.Tensor):\n previous_dtype = x.dtype\n\n if self.active_adapter not in self.lora_A.keys():\n return self._linear(x)\n\n if self.disable_adapter:\n if self.r[self.active_adapter] > 0 and self.merged:\n self.unmerge()\n result = self._linear(x)\n\n elif self.r[self.active_adapter] == 0 or self.merged:\n result = self._linear(x)\n\n else:\n lora_A = self.lora_A[self.active_adapter]\n lora_B = self.lora_B[self.active_adapter]\n dropout = self.lora_dropout[self.active_adapter]\n scaling = self.scaling[self.active_adapter]\n\n result = self._linear(x)\n x = x.to(lora_A.weight.dtype)\n result += lora_B(lora_A(dropout(x))) * scaling\n\n result = result.to(previous_dtype)\n\n return result\n\n def extract_lora_params(self):\n lora_params = {\n \"lora_A\": self.lora_A[self.active_adapter].state_dict(),\n \"lora_B\": self.lora_B[self.active_adapter].state_dict(),\n }\n\n return lora_params\n\n # Helper function to bias the training towards either the target module or the entire model"
},
{
"identifier": "OPTLoraConfig",
"path": "machop/chop/models/manual/opt_lora/configuration_opt_lora.py",
"snippet": "class OPTLoraConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`OPTModel`]. It is used to instantiate a OPT model\n according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the OPT\n [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 50272):\n Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`OPTModel`]\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of decoder layers.\n ffn_dim (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in decoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer decoder.\n activation_function (`str` or `function`, *optional*, defaults to `\"relu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n do_layer_norm_before (`bool`, *optional*, defaults to `True`):\n Whether to perform layer normalization before the attention block.\n word_embed_proj_dim (`int`, *optional*):\n `word_embed_proj_dim` can be set to down-project word embeddings, *e.g.* `opt-350m`. Defaults to\n `hidden_size`.\n dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n layerdrop: (`float`, *optional*, defaults to 0.0):\n The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more\n details.\n init_std (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n enable_bias (`bool`, *optional*, defaults to `True`):\n Whether or not if the linear layers in the attention blocks should use the bias term.\n layer_norm_elementwise_affine (`bool`, *optional*, defaults to `True`):\n Whether or not if the layer norms should have learnable parameters.\n\n Example:\n\n ```python\n >>> from transformers import OPTConfig, OPTModel\n\n >>> # Initializing a OPT facebook/opt-large style configuration\n >>> configuration = OPTConfig()\n\n >>> # Initializing a model (with random weights) from the facebook/opt-large style configuration\n >>> model = OPTModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"opt\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=50272,\n hidden_size=768,\n num_hidden_layers=12,\n ffn_dim=3072,\n max_position_embeddings=2048,\n do_layer_norm_before=True,\n _remove_final_layer_norm=False,\n word_embed_proj_dim=None,\n dropout=0.1,\n attention_dropout=0.0,\n num_attention_heads=12,\n activation_function=\"relu\",\n layerdrop=0.0,\n init_std=0.02,\n use_cache=False,\n pad_token_id=1,\n bos_token_id=2,\n eos_token_id=2,\n enable_bias=True,\n layer_norm_elementwise_affine=True,\n lora_config: dict = None,\n **kwargs,\n ):\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n **kwargs,\n )\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.num_attention_heads = num_attention_heads\n self.word_embed_proj_dim = (\n word_embed_proj_dim if word_embed_proj_dim is not None else hidden_size\n )\n self.ffn_dim = ffn_dim\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.activation_function = activation_function\n self.init_std = init_std\n self.layerdrop = layerdrop\n self.use_cache = use_cache\n self.do_layer_norm_before = do_layer_norm_before\n # We keep these variables at `True` for backward compatibility.\n self.enable_bias = enable_bias\n self.layer_norm_elementwise_affine = layer_norm_elementwise_affine\n if lora_config is not None:\n lora_config = parse_opt_lora_config(lora_config, num_hidden_layers)\n self.lora_config = lora_config\n\n # Note that the only purpose of `_remove_final_layer_norm` is to keep backward compatibility\n # with checkpoints that have been fine-tuned before transformers v4.20.1\n # see https://github.com/facebookresearch/metaseq/pull/164\n self._remove_final_layer_norm = _remove_final_layer_norm\n\n def __setattr__(self, key, value):\n if key == \"lora_config\" and value is not None:\n value = parse_opt_lora_config(\n config=value, num_hidden_layers=self.num_hidden_layers\n )\n return super().__setattr__(key, value)"
},
{
"identifier": "OPTAttention_attention_get_dtype_min",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_attention_get_dtype_min(attn_weights: Tensor) -> Tensor:\n return torch.tensor(torch.finfo(attn_weights.dtype).min)"
},
{
"identifier": "OPTAttention_attention_mask_shape_check",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_attention_mask_shape_check(\n attention_mask: Tensor, bsz: int, tgt_len: int, src_len: int\n) -> bool:\n if attention_mask.size() != (bsz, 1, tgt_len, src_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"\n )"
},
{
"identifier": "OPTAttention_attn_output_shape_check",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_attn_output_shape_check(\n attn_output: Tensor, bsz: int, num_heads: int, tgt_len: int, head_dim: int\n) -> bool:\n if attn_output.size() != (bsz * num_heads, tgt_len, head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, num_heads, tgt_len, head_dim)}, but is\"\n f\" {attn_output.size()}\"\n )"
},
{
"identifier": "OPTAttention_attn_weight_dtype_check",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_attn_weight_dtype_check(attn_weights: Tensor) -> bool:\n assert attn_weights.dtype != torch.float16, \"FP16 is not supported for OPTAttention\""
},
{
"identifier": "OPTAttention_attn_weights_shape_check",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_attn_weights_shape_check(\n attn_weights: Tensor, bsz: int, num_heads: int, tgt_len: int, src_len: int\n) -> bool:\n if attn_weights.size() != (bsz * num_heads, tgt_len, src_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz * num_heads, tgt_len, src_len)}, but is\"\n f\" {attn_weights.size()}\"\n )"
},
{
"identifier": "OPTAttention_layer_head_mask_shape_check",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_layer_head_mask_shape_check(\n layer_head_mask: Tensor, num_heads: int\n) -> bool:\n if layer_head_mask.size() != (num_heads,):\n raise ValueError(\n f\"Head mask for a single layer should be of size {(num_heads,)}, but is\"\n f\" {layer_head_mask.size()}\"\n )"
},
{
"identifier": "OPTAttention_reshape_qkv_back_for_bmm",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_reshape_qkv_back_for_bmm(\n query_states: Tensor,\n key_states: Tensor,\n value_states: Tensor,\n proj_shape: int,\n tgt_len: int,\n bsz: int,\n num_heads: int,\n head_dim: int,\n) -> Tuple[Tensor]:\n query_states = OPTAttention_self_shape(\n query_states, tgt_len, bsz, num_heads, head_dim\n ).view(*proj_shape)\n key_states = key_states.view(*proj_shape)\n value_states = value_states.view(*proj_shape)\n return query_states, key_states, value_states"
},
{
"identifier": "OPTAttention_self_shape",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_self_shape(\n tensor: Tensor, seq_len: int, bsz: int, num_heads: int, head_dim: int\n) -> Tensor:\n \"\"\"\n reshape and permute the Tensor for matmul\n [B, N, h*d_head] -> [B, N, h, d_head] -> [B, h, N, d_head]\n\n replaces `OPTAttention._shape` method\n \"\"\"\n return tensor.view(bsz, seq_len, num_heads, head_dim).transpose(1, 2).contiguous()"
},
{
"identifier": "OPTDecoder_check_head_mask",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTDecoder_check_head_mask(head_mask, decoder_layers) -> bool:\n for attn_mask, mask_name in zip([head_mask], [\"head_mask\"]):\n if attn_mask is not None:\n if attn_mask.size()[0] != (len(decoder_layers)):\n raise ValueError(\n f\"The `{mask_name}` should be specified for {len(decoder_layers)} layers, but it is for\"\n f\" {head_mask.size()[0]}.\"\n )"
},
{
"identifier": "OPTDecoder_self_prepare_decoder_attention",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTDecoder_self_prepare_decoder_attention(\n attention_mask: Tensor,\n input_shape,\n inputs_embeds: Tensor,\n past_key_values_length: int,\n) -> Tensor:\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(\n input_shape,\n inputs_embeds.dtype,\n past_key_values_length=past_key_values_length,\n ).to(inputs_embeds.device)\n\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n expanded_attn_mask = _expand_mask(\n attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]\n ).to(inputs_embeds.device)\n combined_attention_mask = (\n expanded_attn_mask\n if combined_attention_mask is None\n else expanded_attn_mask + combined_attention_mask\n )\n\n return combined_attention_mask"
},
{
"identifier": "OPTForCasualLM_compute_loss",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTForCasualLM_compute_loss(logits, labels, self_config_vocab_size):\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss = torch.nn.functional.cross_entropy(\n shift_logits.view(-1, self_config_vocab_size), shift_labels.view(-1)\n )\n # loss = self_loss_fct(\n # shift_logits.view(-1, self_config_vocab_size), shift_labels.view(-1)\n # )\n return loss"
}
] | import random
import torch
import torch.utils.checkpoint
from typing import Optional, Tuple, Union
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging, replace_return_docstrings
from ..lora_modules import LoraLayer, LinearLora
from .configuration_opt_lora import OPTLoraConfig
from .utils_opt import (
OPTAttention_attention_get_dtype_min,
OPTAttention_attention_mask_shape_check,
OPTAttention_attn_output_shape_check,
OPTAttention_attn_weight_dtype_check,
OPTAttention_attn_weights_shape_check,
OPTAttention_layer_head_mask_shape_check,
OPTAttention_reshape_qkv_back_for_bmm,
OPTAttention_self_shape,
OPTDecoder_check_head_mask,
OPTDecoder_self_prepare_decoder_attention,
OPTForCasualLM_compute_loss,
) | 7,224 | self.lm_head = nn.Linear(
config.word_embed_proj_dim, config.vocab_size, bias=False
)
# self.loss_fct = CrossEntropyLoss()
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
)
def forward(
self,
input_ids: torch.LongTensor,
attention_mask: torch.Tensor = None,
labels: torch.LongTensor = None,
head_mask: Optional[torch.Tensor] = None,
# inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, OPTForCausalLM
>>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
return_dict = self.config.return_dict if return_dict is None else return_dict
output_attentions = (
self.config.output_attentions
if output_attentions is None
else output_attentions
)
output_hidden_states = (
self.config.output_hidden_states
if output_hidden_states is None
else output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
# inputs_embeds=inputs_embeds,
)
logits = self.lm_head(outputs[0]).contiguous()
loss = None
if labels is not None:
# # Shift so that tokens < n predict n
| # coding=utf-8
# ----------------------------------------------
# This is a traceable version of OPTModel and OPTForCausalLanguageModeling
# modified code based on HuggingFace's opt
# ----------------------------------------------
# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch OPT model."""
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/opt-350m"
_CONFIG_FOR_DOC = "OPTLoraConfig"
# Base model docstring
_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
OPT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/opt-125m",
"facebook/opt-350m",
"facebook/opt-1.3b",
"facebook/opt-2.7b",
"facebook/opt-6.7b",
"facebook/opt-13b",
"facebook/opt-30b",
# See all OPT models at https://huggingface.co/models?filter=opt
]
class OPTLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(
self, attention_mask: torch.LongTensor, past_key_values_length: int = 0
):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return super().forward(positions + self.offset)
class OPTAttention(nn.Module):
"""
- FX-traceable Multi-headed attention from 'Attention Is All You Need' paper
- This module includes multi-head (k, q, v linear, attention), concat, and attention output linear
- To make this module traceable, `mode` must be one of integer 0, 1, 2, or 3.
- The default mode `None` (un-traceable mode) can be used for training (testing), but not for modify-sw.
"""
custom_node_leaf_patch = [
("embeddings", "BertEmbeddingsPatched", OPTLearnedPositionalEmbedding)
]
def __init__(
self,
config: OPTLoraConfig,
embed_dim: int,
num_heads: int,
layer_id: int = 0,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = False,
):
super().__init__()
self.config = config
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
lora_config = config.lora_config[f"model_layer_{layer_id}"]["self_attn"]
self.k_proj = LinearLora(
in_features=embed_dim,
out_features=embed_dim,
bias=bias,
config=lora_config["k_proj"],
)
self.v_proj = LinearLora(
in_features=embed_dim,
out_features=embed_dim,
bias=bias,
config=lora_config["v_proj"],
)
self.q_proj = LinearLora(
in_features=embed_dim,
out_features=embed_dim,
bias=bias,
config=lora_config["q_proj"],
)
self.o_proj = LinearLora(
in_features=embed_dim,
out_features=embed_dim,
bias=bias,
config=lora_config["o_proj"],
)
self.lora_config = lora_config
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
# key_value_states: Optional[torch.Tensor] = None,
# past_key_value: Optional[Tuple[torch.Tensor]] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, _ = hidden_states.shape
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# self_attention
# key_value_states is None, past_key_value is None
key_states = OPTAttention_self_shape(
self.k_proj(hidden_states),
seq_len=-1,
bsz=bsz,
num_heads=self.num_heads,
head_dim=self.head_dim,
)
value_states = OPTAttention_self_shape(
self.v_proj(hidden_states),
seq_len=-1,
bsz=bsz,
num_heads=self.num_heads,
head_dim=self.head_dim,
)
# proj_shape = OPTAttention_construct_proj_shape(
# bsz, self.num_heads, self.head_dim
# )
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states, key_states, value_states = OPTAttention_reshape_qkv_back_for_bmm(
query_states,
key_states,
value_states,
proj_shape=proj_shape,
tgt_len=tgt_len,
bsz=bsz,
num_heads=self.num_heads,
head_dim=self.head_dim,
)
src_len = key_states.shape[1]
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
OPTAttention_attn_weights_shape_check(
attn_weights, bsz, self.num_heads, tgt_len, src_len
)
if attention_mask is not None:
OPTAttention_attention_mask_shape_check(
attention_mask, bsz, tgt_len, src_len
)
attn_weights = (
attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attention_mask
)
attn_weights = torch.max(
attn_weights, OPTAttention_attention_get_dtype_min(attn_weights)
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# Patched OPTAttention does not support FP16
# upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
OPTAttention_attn_weight_dtype_check(attn_weights)
# *: Currently this model does not support torch.float16
# if attn_weights.dtype == torch.float16:
# attn_weights = nn.functional.softmax(
# attn_weights, dim=-1, dtype=torch.float32
# ).to(torch.float16)
# else:
# attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
OPTAttention_layer_head_mask_shape_check(layer_head_mask, self.num_heads)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights_reshaped.view(
bsz * self.num_heads, tgt_len, src_len
)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
)
attn_output = torch.bmm(attn_probs, value_states)
OPTAttention_attn_output_shape_check(
attn_output, bsz, self.num_heads, tgt_len, self.head_dim
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights_reshaped
class OPTDecoderLayer(nn.Module):
def __init__(self, config: OPTLoraConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = OPTAttention(
config=config,
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
bias=config.enable_bias,
)
self.do_layer_norm_before = config.do_layer_norm_before
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.self_attn_layer_norm = nn.LayerNorm(
self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine
)
self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=config.enable_bias)
self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=config.enable_bias)
self.final_layer_norm = nn.LayerNorm(
self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
if self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# *: key_value_states is always None
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
# past_key_value=None,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
# key_value_states=None,
)
hidden_states = nn.functional.dropout(
hidden_states, p=self.dropout, training=self.training
)
hidden_states = residual + hidden_states
# 350m applies layer norm AFTER attention
if not self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Fully Connected
hidden_states_shape = hidden_states.shape
hidden_states = hidden_states.reshape(-1, hidden_states.shape[-1])
residual = hidden_states
# 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
if self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(
hidden_states, p=self.dropout, training=self.training
)
hidden_states = (residual + hidden_states).view(hidden_states_shape)
# 350m applies layer norm AFTER attention
if not self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
class OPTPreTrainedModel(PreTrainedModel):
config_class = OPTLoraConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["OPTDecoderLayer"]
_keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, OPTDecoder):
module.gradient_checkpointing = value
class OPTDecoder(OPTPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`]
Args:
config: OPTConfig
"""
custom_node_leaf_patch = [
(
"embed_positions",
"OPTLearnedPositionalEmbedding",
OPTLearnedPositionalEmbedding,
)
]
def __init__(self, config: OPTLoraConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(
config.vocab_size, config.word_embed_proj_dim, self.padding_idx
)
self.embed_positions = OPTLearnedPositionalEmbedding(
config.max_position_embeddings, config.hidden_size
)
if config.word_embed_proj_dim != config.hidden_size:
self.project_out = nn.Linear(
config.hidden_size, config.word_embed_proj_dim, bias=False
)
else:
self.project_out = None
if config.word_embed_proj_dim != config.hidden_size:
self.project_in = nn.Linear(
config.word_embed_proj_dim, config.hidden_size, bias=False
)
else:
self.project_in = None
# Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
# with checkpoints that have been fine-tuned before transformers v4.20.1
# see https://github.com/facebookresearch/metaseq/pull/164
if config.do_layer_norm_before and not config._remove_final_layer_norm:
self.final_layer_norm = nn.LayerNorm(
config.hidden_size,
elementwise_affine=config.layer_norm_elementwise_affine,
)
else:
self.final_layer_norm = None
self.layers = nn.ModuleList(
[OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def forward(
self,
input_ids: torch.LongTensor,
attention_mask: torch.Tensor = None,
head_mask: Optional[torch.Tensor] = None,
# inputs_embeds: Optional[torch.FloatTensor] = None,
return_dict: Optional[bool] = True,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
) -> Union[Tuple, BaseModelOutputWithPast]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
"""
return_dict = self.config.return_dict if return_dict is None else return_dict
output_attentions = (
self.config.output_attentions
if output_attentions is None
else output_attentions
)
output_hidden_states = (
self.config.output_hidden_states
if output_hidden_states is None
else output_hidden_states
)
input_shape = input_ids.shape
input_ids = input_ids.view(-1, input_shape[-1])
# input_ids = OPTDecoder_view_input_ids(
# input_ids=input_ids, input_shape=input_shape
# )
past_key_values_length = 0
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
# TODO: check this?
if attention_mask is None:
attention_mask = torch.ones(
inputs_embeds.shape[:2], dtype=torch.bool, device=inputs_embeds.device
)
pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
attention_mask = OPTDecoder_self_prepare_decoder_attention(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
if self.project_in is not None:
inputs_embeds = self.project_in(inputs_embeds)
hidden_states = inputs_embeds + pos_embeds
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
OPTDecoder_check_head_mask(head_mask, self.layers)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if self.final_layer_norm is not None:
hidden_states = self.final_layer_norm(hidden_states)
if self.project_out is not None:
hidden_states = self.project_out(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
class OPTModel(OPTPreTrainedModel):
def __init__(self, config: OPTLoraConfig):
super().__init__(config)
self.decoder = OPTDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.decoder.embed_tokens
def set_input_embeddings(self, value):
self.decoder.embed_tokens = value
def get_decoder(self):
return self.decoder
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor = None,
head_mask: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = True,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = (
self.config.output_attentions
if output_attentions is None
else output_attentions
)
output_hidden_states = (
self.config.output_hidden_states
if output_hidden_states is None
else output_hidden_states
)
return_dict = self.config.return_dict if return_dict is None else return_dict
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs
return BaseModelOutputWithPast(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
)
class OPTForCausalLM(OPTPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.model = OPTModel(config)
# the lm_head weight is automatically tied to the embed tokens weight
self.lm_head = nn.Linear(
config.word_embed_proj_dim, config.vocab_size, bias=False
)
# self.loss_fct = CrossEntropyLoss()
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
)
def forward(
self,
input_ids: torch.LongTensor,
attention_mask: torch.Tensor = None,
labels: torch.LongTensor = None,
head_mask: Optional[torch.Tensor] = None,
# inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, OPTForCausalLM
>>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
return_dict = self.config.return_dict if return_dict is None else return_dict
output_attentions = (
self.config.output_attentions
if output_attentions is None
else output_attentions
)
output_hidden_states = (
self.config.output_hidden_states
if output_hidden_states is None
else output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
# inputs_embeds=inputs_embeds,
)
logits = self.lm_head(outputs[0]).contiguous()
loss = None
if labels is not None:
# # Shift so that tokens < n predict n | loss = OPTForCasualLM_compute_loss( | 13 | 2023-12-18 12:50:53+00:00 | 12k |
byeongjun-park/HarmonyView | ldm/models/diffusion/sync_dreamer.py | [
{
"identifier": "read_pickle",
"path": "ldm/base_utils.py",
"snippet": "def read_pickle(pkl_path):\n with open(pkl_path, 'rb') as f:\n return pickle.load(f)"
},
{
"identifier": "concat_images_list",
"path": "ldm/base_utils.py",
"snippet": "def concat_images_list(*args,vert=False):\n if len(args)==1: return args[0]\n img_out=args[0]\n for img in args[1:]:\n img_out=concat_images(img_out,img,vert)\n return img_out"
},
{
"identifier": "get_warp_coordinates",
"path": "ldm/models/diffusion/sync_dreamer_utils.py",
"snippet": "def get_warp_coordinates(volume_xyz, warp_size, input_size, Ks, warp_pose):\n B, _, D, H, W = volume_xyz.shape\n ratio = warp_size / input_size\n warp_proj = construct_project_matrix(ratio, ratio, Ks, warp_pose) # B,4,4\n warp_coords = project_and_normalize(volume_xyz.view(B,3,D*H*W), warp_proj, warp_size).view(B, D, H, W, 2)\n return warp_coords"
},
{
"identifier": "create_target_volume",
"path": "ldm/models/diffusion/sync_dreamer_utils.py",
"snippet": "def create_target_volume(depth_size, volume_size, input_image_size, pose_target, K, near=None, far=None):\n device, dtype = pose_target.device, pose_target.dtype\n\n # compute a depth range on the unit sphere\n H, W, D, B = volume_size, volume_size, depth_size, pose_target.shape[0]\n if near is not None and far is not None :\n # near, far b,1,h,w\n depth_values = torch.linspace(0, 1, steps=depth_size).to(near.device).to(near.dtype) # d\n depth_values = depth_values.view(1, D, 1, 1) # 1,d,1,1\n depth_values = depth_values * (far - near) + near # b d h w\n depth_values = depth_values.view(B, 1, D, H * W)\n else:\n near, far = near_far_from_unit_sphere_using_camera_poses(pose_target) # b 1\n depth_values = torch.linspace(0, 1, steps=depth_size).to(near.device).to(near.dtype) # d\n depth_values = depth_values[None,:,None] * (far[:,None,:] - near[:,None,:]) + near[:,None,:] # b d 1\n depth_values = depth_values.view(B, 1, D, 1).expand(B, 1, D, H*W)\n\n ratio = volume_size / input_image_size\n\n # creat a grid on the target (reference) view\n # H, W, D, B = volume_size, volume_size, depth_values.shape[1], depth_values.shape[0]\n\n # creat mesh grid: note reference also means target\n ref_grid = create_meshgrid(H, W, normalized_coordinates=False) # (1, H, W, 2)\n ref_grid = ref_grid.to(device).to(dtype)\n ref_grid = ref_grid.permute(0, 3, 1, 2) # (1, 2, H, W)\n ref_grid = ref_grid.reshape(1, 2, H*W) # (1, 2, H*W)\n ref_grid = ref_grid.expand(B, -1, -1) # (B, 2, H*W)\n ref_grid = torch.cat((ref_grid, torch.ones(B, 1, H*W, dtype=ref_grid.dtype, device=ref_grid.device)), dim=1) # (B, 3, H*W)\n ref_grid = ref_grid.unsqueeze(2) * depth_values # (B, 3, D, H*W)\n\n # unproject to space and transfer to world coordinates.\n Ks = K\n ref_proj = construct_project_matrix(ratio, ratio, Ks, pose_target) # B,4,4\n ref_proj_inv = torch.inverse(ref_proj) # B,4,4\n ref_grid = ref_proj_inv[:,:3,:3] @ ref_grid.view(B,3,D*H*W) + ref_proj_inv[:,:3,3:] # B,3,3 @ B,3,DHW + B,3,1 => B,3,DHW\n return ref_grid.reshape(B,3,D,H,W), depth_values.view(B,1,D,H,W)"
},
{
"identifier": "NoisyTargetViewEncoder",
"path": "ldm/models/diffusion/sync_dreamer_network.py",
"snippet": "class NoisyTargetViewEncoder(nn.Module):\n def __init__(self, time_embed_dim, viewpoint_dim, run_dim=16, output_dim=8):\n super().__init__()\n\n self.init_conv = nn.Conv2d(4, run_dim, 3, 1, 1)\n self.out_conv0 = Image2DResBlockWithTV(run_dim, time_embed_dim, viewpoint_dim)\n self.out_conv1 = Image2DResBlockWithTV(run_dim, time_embed_dim, viewpoint_dim)\n self.out_conv2 = Image2DResBlockWithTV(run_dim, time_embed_dim, viewpoint_dim)\n self.final_out = nn.Sequential(\n nn.GroupNorm(8, run_dim),\n nn.SiLU(True),\n nn.Conv2d(run_dim, output_dim, 3, 1, 1)\n )\n\n def forward(self, x, t, v):\n B, DT = t.shape\n t = t.view(B, DT, 1, 1)\n B, DV = v.shape\n v = v.view(B, DV, 1, 1)\n\n x = self.init_conv(x)\n x = self.out_conv0(x, t, v)\n x = self.out_conv1(x, t, v)\n x = self.out_conv2(x, t, v)\n x = self.final_out(x)\n return x"
},
{
"identifier": "SpatialTime3DNet",
"path": "ldm/models/diffusion/sync_dreamer_network.py",
"snippet": "class SpatialTime3DNet(nn.Module):\n def __init__(self, time_dim=256, input_dim=128, dims=(32, 64, 128, 256)):\n super().__init__()\n d0, d1, d2, d3 = dims\n dt = time_dim\n\n self.init_conv = nn.Conv3d(input_dim, d0, 3, 1, 1) # 32\n self.conv0 = SpatialTimeBlock(d0, dt, d0, stride=1)\n\n self.conv1 = SpatialTimeBlock(d0, dt, d1, stride=2)\n self.conv2_0 = SpatialTimeBlock(d1, dt, d1, stride=1)\n self.conv2_1 = SpatialTimeBlock(d1, dt, d1, stride=1)\n\n self.conv3 = SpatialTimeBlock(d1, dt, d2, stride=2)\n self.conv4_0 = SpatialTimeBlock(d2, dt, d2, stride=1)\n self.conv4_1 = SpatialTimeBlock(d2, dt, d2, stride=1)\n\n self.conv5 = SpatialTimeBlock(d2, dt, d3, stride=2)\n self.conv6_0 = SpatialTimeBlock(d3, dt, d3, stride=1)\n self.conv6_1 = SpatialTimeBlock(d3, dt, d3, stride=1)\n\n self.conv7 = SpatialUpTimeBlock(d3, dt, d2)\n self.conv8 = SpatialUpTimeBlock(d2, dt, d1)\n self.conv9 = SpatialUpTimeBlock(d1, dt, d0)\n\n def forward(self, x, t):\n B, C = t.shape\n t = t.view(B, C, 1, 1, 1)\n\n x = self.init_conv(x)\n conv0 = self.conv0(x, t)\n\n x = self.conv1(conv0, t)\n x = self.conv2_0(x, t)\n conv2 = self.conv2_1(x, t)\n\n x = self.conv3(conv2, t)\n x = self.conv4_0(x, t)\n conv4 = self.conv4_1(x, t)\n\n x = self.conv5(conv4, t)\n x = self.conv6_0(x, t)\n x = self.conv6_1(x, t)\n\n x = conv4 + self.conv7(x, t)\n x = conv2 + self.conv8(x, t)\n x = conv0 + self.conv9(x, t)\n return x"
},
{
"identifier": "FrustumTV3DNet",
"path": "ldm/models/diffusion/sync_dreamer_network.py",
"snippet": "class FrustumTV3DNet(nn.Module):\n def __init__(self, in_dim, t_dim, v_dim, dims=(32, 64, 128, 256)):\n super().__init__()\n self.conv0 = nn.Conv3d(in_dim, dims[0], 3, 1, 1) # 32\n\n self.conv1 = FrustumTVBlock(dims[0], t_dim, v_dim, dims[1], 2)\n self.conv2 = FrustumTVBlock(dims[1], t_dim, v_dim, dims[1], 1)\n\n self.conv3 = FrustumTVBlock(dims[1], t_dim, v_dim, dims[2], 2)\n self.conv4 = FrustumTVBlock(dims[2], t_dim, v_dim, dims[2], 1)\n\n self.conv5 = FrustumTVBlock(dims[2], t_dim, v_dim, dims[3], 2)\n self.conv6 = FrustumTVBlock(dims[3], t_dim, v_dim, dims[3], 1)\n\n self.up0 = FrustumTVUpBlock(dims[3], t_dim, v_dim, dims[2])\n self.up1 = FrustumTVUpBlock(dims[2], t_dim, v_dim, dims[1])\n self.up2 = FrustumTVUpBlock(dims[1], t_dim, v_dim, dims[0])\n\n def forward(self, x, t, v):\n B,DT = t.shape\n t = t.view(B,DT,1,1,1)\n B,DV = v.shape\n v = v.view(B,DV,1,1,1)\n\n b, _, d, h, w = x.shape\n x0 = self.conv0(x)\n x1 = self.conv2(self.conv1(x0, t, v), t, v)\n x2 = self.conv4(self.conv3(x1, t, v), t, v)\n x3 = self.conv6(self.conv5(x2, t, v), t, v)\n\n x2 = self.up0(x3, t, v) + x2\n x1 = self.up1(x2, t, v) + x1\n x0 = self.up2(x1, t, v) + x0\n return {w: x0, w//2: x1, w//4: x2, w//8: x3}"
},
{
"identifier": "make_ddim_timesteps",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):\n if ddim_discr_method == 'uniform':\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == 'quad':\n ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)\n else:\n raise NotImplementedError(f'There is no ddim discretization method called \"{ddim_discr_method}\"')\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f'Selected timesteps for ddim sampler: {steps_out}')\n return steps_out"
},
{
"identifier": "timestep_embedding",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding"
},
{
"identifier": "FrozenCLIPImageEmbedder",
"path": "ldm/modules/encoders/modules.py",
"snippet": "class FrozenCLIPImageEmbedder(AbstractEncoder):\n \"\"\"\n Uses the CLIP image encoder.\n Not actually frozen... If you want that set cond_stage_trainable=False in cfg\n \"\"\"\n def __init__(\n self,\n model='ViT-L/14',\n jit=False,\n device='cpu',\n antialias=False,\n ):\n super().__init__()\n self.model, _ = clip.load(name=model, device=device, jit=jit)\n # We don't use the text part so delete it\n del self.model.transformer\n self.antialias = antialias\n self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)\n self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)\n\n def preprocess(self, x):\n # Expects inputs in the range -1, 1\n x = kornia.geometry.resize(x, (224, 224),\n interpolation='bicubic',align_corners=True,\n antialias=self.antialias)\n x = (x + 1.) / 2.\n # renormalize according to clip\n x = kornia.enhance.normalize(x, self.mean, self.std)\n return x\n\n def forward(self, x):\n # x is assumed to be in range [-1,1]\n if isinstance(x, list):\n # [\"\"] denotes condition dropout for ucg\n device = self.model.visual.conv1.weight.device\n return torch.zeros(1, 768, device=device)\n return self.model.encode_image(self.preprocess(x)).float()\n\n def encode(self, im):\n return self(im).unsqueeze(1)"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
}
] | from pathlib import Path
from skimage.io import imsave
from torch.optim.lr_scheduler import LambdaLR
from tqdm import tqdm
from ldm.base_utils import read_pickle, concat_images_list
from ldm.models.diffusion.sync_dreamer_utils import get_warp_coordinates, create_target_volume
from ldm.models.diffusion.sync_dreamer_network import NoisyTargetViewEncoder, SpatialTime3DNet, FrustumTV3DNet
from ldm.modules.diffusionmodules.util import make_ddim_timesteps, timestep_embedding
from ldm.modules.encoders.modules import FrozenCLIPImageEmbedder
from ldm.util import instantiate_from_config
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np | 7,436 | volume_xyz, volume_depth = create_target_volume(D, self.frustum_volume_size, self.input_image_size, poses_, Ks_, near, far) # B*TN,3 or 1,D,H,W
volume_xyz_ = volume_xyz / self.spatial_volume_length # since the spatial volume is constructed in [-spatial_volume_length,spatial_volume_length]
volume_xyz_ = volume_xyz_.permute(0, 2, 3, 4, 1) # B*TN,D,H,W,3
spatial_volume_ = spatial_volume.unsqueeze(1).repeat(1, TN, 1, 1, 1, 1).view(B * TN, -1, V, V, V)
volume_feats = F.grid_sample(spatial_volume_, volume_xyz_, mode='bilinear', padding_mode='zeros', align_corners=True) # B*TN,C,D,H,W
v_embed_ = v_embed[torch.arange(B)[:,None], target_indices.view(B,TN)].view(B*TN, -1) # B*TN
t_embed_ = t_embed.unsqueeze(1).repeat(1,TN,1).view(B*TN,-1)
volume_feats_dict = self.frustum_volume_feats(volume_feats, t_embed_, v_embed_)
return volume_feats_dict, volume_depth
class SyncMultiviewDiffusion(pl.LightningModule):
def __init__(self, unet_config, scheduler_config,
finetune_unet=False, finetune_projection=True,
view_num=16, image_size=256,
cfg_scale=3.0, output_num=8, batch_view_num=4,
drop_conditions=False, drop_scheme='default',
clip_image_encoder_path="/apdcephfs/private_rondyliu/projects/clip/ViT-L-14.pt",
sample_type='ddim', sample_steps=200):
super().__init__()
self.finetune_unet = finetune_unet
self.finetune_projection = finetune_projection
self.view_num = view_num
self.viewpoint_dim = 4
self.output_num = output_num
self.image_size = image_size
self.batch_view_num = batch_view_num
self.cfg_scale = cfg_scale
self.clip_image_encoder_path = clip_image_encoder_path
self._init_time_step_embedding()
self._init_first_stage()
self._init_schedule()
self._init_multiview()
self._init_clip_image_encoder()
self._init_clip_projection()
self.spatial_volume = SpatialVolumeNet(self.time_embed_dim, self.viewpoint_dim, self.view_num)
self.model = UNetWrapper(unet_config, drop_conditions=drop_conditions, drop_scheme=drop_scheme)
self.scheduler_config = scheduler_config
latent_size = image_size//8
if sample_type=='ddim':
self.sampler = SyncDDIMSampler(self, sample_steps , "uniform", 1.0, latent_size=latent_size)
else:
raise NotImplementedError
def _init_clip_projection(self):
self.cc_projection = nn.Linear(772, 768)
nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768])
nn.init.zeros_(list(self.cc_projection.parameters())[1])
self.cc_projection.requires_grad_(True)
if not self.finetune_projection:
disable_training_module(self.cc_projection)
def _init_multiview(self):
K, azs, _, _, poses = read_pickle(f'meta_info/camera-{self.view_num}.pkl')
default_image_size = 256
ratio = self.image_size/default_image_size
K = np.diag([ratio,ratio,1]) @ K
K = torch.from_numpy(K.astype(np.float32)) # [3,3]
K = K.unsqueeze(0).repeat(self.view_num,1,1) # N,3,3
poses = torch.from_numpy(poses.astype(np.float32)) # N,3,4
self.register_buffer('poses', poses)
self.register_buffer('Ks', K)
azs = (azs + np.pi) % (np.pi * 2) - np.pi # scale to [-pi,pi] and the index=0 has az=0
self.register_buffer('azimuth', torch.from_numpy(azs.astype(np.float32)))
def get_viewpoint_embedding(self, batch_size, elevation_ref):
"""
@param batch_size:
@param elevation_ref: B
@return:
"""
azimuth_input = self.azimuth[0].unsqueeze(0) # 1
azimuth_target = self.azimuth # N
elevation_input = -elevation_ref # note that zero123 use a negative elevation here!!!
elevation_target = -np.deg2rad(30)
d_e = elevation_target - elevation_input # B
N = self.azimuth.shape[0]
B = batch_size
d_e = d_e.unsqueeze(1).repeat(1, N)
d_a = azimuth_target - azimuth_input # N
d_a = d_a.unsqueeze(0).repeat(B, 1)
d_z = torch.zeros_like(d_a)
embedding = torch.stack([d_e, torch.sin(d_a), torch.cos(d_a), d_z], -1) # B,N,4
return embedding
def _init_first_stage(self):
first_stage_config={
"target": "ldm.models.autoencoder.AutoencoderKL",
"params": {
"embed_dim": 4,
"monitor": "val/rec_loss",
"ddconfig":{
"double_z": True,
"z_channels": 4,
"resolution": self.image_size,
"in_channels": 3,
"out_ch": 3,
"ch": 128,
"ch_mult": [1,2,4,4],
"num_res_blocks": 2,
"attn_resolutions": [],
"dropout": 0.0
},
"lossconfig": {"target": "torch.nn.Identity"},
}
}
self.first_stage_scale_factor = 0.18215
self.first_stage_model = instantiate_from_config(first_stage_config)
self.first_stage_model = disable_training_module(self.first_stage_model)
def _init_clip_image_encoder(self):
|
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def disable_training_module(module: nn.Module):
module = module.eval()
module.train = disabled_train
for para in module.parameters():
para.requires_grad = False
return module
def repeat_to_batch(tensor, B, VN):
t_shape = tensor.shape
ones = [1 for _ in range(len(t_shape)-1)]
tensor_new = tensor.view(B,1,*t_shape[1:]).repeat(1,VN,*ones).view(B*VN,*t_shape[1:])
return tensor_new
class UNetWrapper(nn.Module):
def __init__(self, diff_model_config, drop_conditions=False, drop_scheme='default', use_zero_123=True):
super().__init__()
self.diffusion_model = instantiate_from_config(diff_model_config)
self.drop_conditions = drop_conditions
self.drop_scheme=drop_scheme
self.use_zero_123 = use_zero_123
def drop(self, cond, mask):
shape = cond.shape
B = shape[0]
cond = mask.view(B,*[1 for _ in range(len(shape)-1)]) * cond
return cond
def get_trainable_parameters(self):
return self.diffusion_model.get_trainable_parameters()
def get_drop_scheme(self, B, device):
if self.drop_scheme=='default':
random = torch.rand(B, dtype=torch.float32, device=device)
drop_clip = (random > 0.15) & (random <= 0.2)
drop_volume = (random > 0.1) & (random <= 0.15)
drop_concat = (random > 0.05) & (random <= 0.1)
drop_all = random <= 0.05
else:
raise NotImplementedError
return drop_clip, drop_volume, drop_concat, drop_all
def forward(self, x, t, clip_embed, volume_feats, x_concat, is_train=False):
"""
@param x: B,4,H,W
@param t: B,
@param clip_embed: B,M,768
@param volume_feats: B,C,D,H,W
@param x_concat: B,C,H,W
@param is_train:
@return:
"""
if self.drop_conditions and is_train:
B = x.shape[0]
drop_clip, drop_volume, drop_concat, drop_all = self.get_drop_scheme(B, x.device)
clip_mask = 1.0 - (drop_clip | drop_all).float()
clip_embed = self.drop(clip_embed, clip_mask)
volume_mask = 1.0 - (drop_volume | drop_all).float()
for k, v in volume_feats.items():
volume_feats[k] = self.drop(v, mask=volume_mask)
concat_mask = 1.0 - (drop_concat | drop_all).float()
x_concat = self.drop(x_concat, concat_mask)
if self.use_zero_123:
# zero123 does not multiply this when encoding, maybe a bug for zero123
first_stage_scale_factor = 0.18215
x_concat_ = x_concat * 1.0
x_concat_[:, :4] = x_concat_[:, :4] / first_stage_scale_factor
else:
x_concat_ = x_concat
x = torch.cat([x, x_concat_], 1)
pred = self.diffusion_model(x, t, clip_embed, source_dict=volume_feats)
return pred
def predict_with_unconditional_scale(self, x, t, clip_embed, volume_feats, x_concat, unconditional_scale):
x_ = torch.cat([x] * 2, 0)
t_ = torch.cat([t] * 2, 0)
clip_embed_ = torch.cat([clip_embed, torch.zeros_like(clip_embed)], 0)
v_ = {}
for k, v in volume_feats.items():
v_[k] = torch.cat([v, torch.zeros_like(v)], 0)
x_concat_ = torch.cat([x_concat, torch.zeros_like(x_concat)], 0)
if self.use_zero_123:
# zero123 does not multiply this when encoding, maybe a bug for zero123
first_stage_scale_factor = 0.18215
x_concat_[:, :4] = x_concat_[:, :4] / first_stage_scale_factor
x_ = torch.cat([x_, x_concat_], 1)
s, s_uc = self.diffusion_model(x_, t_, clip_embed_, source_dict=v_).chunk(2)
s = s_uc + unconditional_scale * (s - s_uc)
return s
def predict_with_decomposed_unconditional_scales(self, x, t, clip_embed, volume_feats, x_concat, unconditional_scales):
x_ = torch.cat([x] * 3, 0)
t_ = torch.cat([t] * 3, 0)
clip_embed_ = torch.cat([clip_embed, torch.zeros_like(clip_embed), clip_embed], 0)
x_concat_ = torch.cat([x_concat, torch.zeros_like(x_concat), x_concat*4], 0)
v_ = {}
for k, v in volume_feats.items():
v_[k] = torch.cat([v, v, torch.zeros_like(v)], 0)
if self.use_zero_123:
# zero123 does not multiply this when encoding, maybe a bug for zero123
first_stage_scale_factor = 0.18215
x_concat_[:, :4] = x_concat_[:, :4] / first_stage_scale_factor
x_ = torch.cat([x_, x_concat_], 1)
s, s_uc1, s_uc2 = self.diffusion_model(x_, t_, clip_embed_, source_dict=v_).chunk(3)
s = s + unconditional_scales[0] * (s - s_uc1) + unconditional_scales[1] * (s - s_uc2)
return s
class SpatialVolumeNet(nn.Module):
def __init__(self, time_dim, view_dim, view_num,
input_image_size=256, frustum_volume_depth=48,
spatial_volume_size=32, spatial_volume_length=0.5,
frustum_volume_length=0.86603 # sqrt(3)/2
):
super().__init__()
self.target_encoder = NoisyTargetViewEncoder(time_dim, view_dim, output_dim=16)
self.spatial_volume_feats = SpatialTime3DNet(input_dim=16 * view_num, time_dim=time_dim, dims=(64, 128, 256, 512))
self.frustum_volume_feats = FrustumTV3DNet(64, time_dim, view_dim, dims=(64, 128, 256, 512))
self.frustum_volume_length = frustum_volume_length
self.input_image_size = input_image_size
self.spatial_volume_size = spatial_volume_size
self.spatial_volume_length = spatial_volume_length
self.frustum_volume_size = self.input_image_size // 8
self.frustum_volume_depth = frustum_volume_depth
self.time_dim = time_dim
self.view_dim = view_dim
self.default_origin_depth = 1.5 # our rendered images are 1.5 away from the origin, we assume camera is 1.5 away from the origin
def construct_spatial_volume(self, x, t_embed, v_embed, target_poses, target_Ks):
"""
@param x: B,N,4,H,W
@param t_embed: B,t_dim
@param v_embed: B,N,v_dim
@param target_poses: N,3,4
@param target_Ks: N,3,3
@return:
"""
B, N, _, H, W = x.shape
V = self.spatial_volume_size
device = x.device
spatial_volume_verts = torch.linspace(-self.spatial_volume_length, self.spatial_volume_length, V, dtype=torch.float32, device=device)
spatial_volume_verts = torch.stack(torch.meshgrid(spatial_volume_verts, spatial_volume_verts, spatial_volume_verts, indexing='ij'), -1)
spatial_volume_verts = spatial_volume_verts.reshape(1, V ** 3, 3)[:, :, (2, 1, 0)]
spatial_volume_verts = spatial_volume_verts.view(1, V, V, V, 3).permute(0, 4, 1, 2, 3).repeat(B, 1, 1, 1, 1)
# encode source features
t_embed_ = t_embed.view(B, 1, self.time_dim).repeat(1, N, 1).view(B, N, self.time_dim)
v_embed_ = v_embed
target_Ks = target_Ks.unsqueeze(0).repeat(B, 1, 1, 1)
target_poses = target_poses.unsqueeze(0).repeat(B, 1, 1, 1)
# extract 2D image features
spatial_volume_feats = []
# project source features
for ni in range(0, N):
pose_source_ = target_poses[:, ni]
K_source_ = target_Ks[:, ni]
x_ = self.target_encoder(x[:, ni], t_embed_[:, ni], v_embed_[:, ni])
C = x_.shape[1]
coords_source = get_warp_coordinates(spatial_volume_verts, x_.shape[-1], self.input_image_size, K_source_, pose_source_).view(B, V, V * V, 2)
unproj_feats_ = F.grid_sample(x_, coords_source, mode='bilinear', padding_mode='zeros', align_corners=True)
unproj_feats_ = unproj_feats_.view(B, C, V, V, V)
spatial_volume_feats.append(unproj_feats_)
spatial_volume_feats = torch.stack(spatial_volume_feats, 1) # B,N,C,V,V,V
N = spatial_volume_feats.shape[1]
spatial_volume_feats = spatial_volume_feats.view(B, N*C, V, V, V)
spatial_volume_feats = self.spatial_volume_feats(spatial_volume_feats, t_embed) # b,64,32,32,32
return spatial_volume_feats
def construct_view_frustum_volume(self, spatial_volume, t_embed, v_embed, poses, Ks, target_indices):
"""
@param spatial_volume: B,C,V,V,V
@param t_embed: B,t_dim
@param v_embed: B,N,v_dim
@param poses: N,3,4
@param Ks: N,3,3
@param target_indices: B,TN
@return: B*TN,C,H,W
"""
B, TN = target_indices.shape
H, W = self.frustum_volume_size, self.frustum_volume_size
D = self.frustum_volume_depth
V = self.spatial_volume_size
near = torch.ones(B * TN, 1, H, W, dtype=spatial_volume.dtype, device=spatial_volume.device) * self.default_origin_depth - self.frustum_volume_length
far = torch.ones(B * TN, 1, H, W, dtype=spatial_volume.dtype, device=spatial_volume.device) * self.default_origin_depth + self.frustum_volume_length
target_indices = target_indices.view(B*TN) # B*TN
poses_ = poses[target_indices] # B*TN,3,4
Ks_ = Ks[target_indices] # B*TN,3,4
volume_xyz, volume_depth = create_target_volume(D, self.frustum_volume_size, self.input_image_size, poses_, Ks_, near, far) # B*TN,3 or 1,D,H,W
volume_xyz_ = volume_xyz / self.spatial_volume_length # since the spatial volume is constructed in [-spatial_volume_length,spatial_volume_length]
volume_xyz_ = volume_xyz_.permute(0, 2, 3, 4, 1) # B*TN,D,H,W,3
spatial_volume_ = spatial_volume.unsqueeze(1).repeat(1, TN, 1, 1, 1, 1).view(B * TN, -1, V, V, V)
volume_feats = F.grid_sample(spatial_volume_, volume_xyz_, mode='bilinear', padding_mode='zeros', align_corners=True) # B*TN,C,D,H,W
v_embed_ = v_embed[torch.arange(B)[:,None], target_indices.view(B,TN)].view(B*TN, -1) # B*TN
t_embed_ = t_embed.unsqueeze(1).repeat(1,TN,1).view(B*TN,-1)
volume_feats_dict = self.frustum_volume_feats(volume_feats, t_embed_, v_embed_)
return volume_feats_dict, volume_depth
class SyncMultiviewDiffusion(pl.LightningModule):
def __init__(self, unet_config, scheduler_config,
finetune_unet=False, finetune_projection=True,
view_num=16, image_size=256,
cfg_scale=3.0, output_num=8, batch_view_num=4,
drop_conditions=False, drop_scheme='default',
clip_image_encoder_path="/apdcephfs/private_rondyliu/projects/clip/ViT-L-14.pt",
sample_type='ddim', sample_steps=200):
super().__init__()
self.finetune_unet = finetune_unet
self.finetune_projection = finetune_projection
self.view_num = view_num
self.viewpoint_dim = 4
self.output_num = output_num
self.image_size = image_size
self.batch_view_num = batch_view_num
self.cfg_scale = cfg_scale
self.clip_image_encoder_path = clip_image_encoder_path
self._init_time_step_embedding()
self._init_first_stage()
self._init_schedule()
self._init_multiview()
self._init_clip_image_encoder()
self._init_clip_projection()
self.spatial_volume = SpatialVolumeNet(self.time_embed_dim, self.viewpoint_dim, self.view_num)
self.model = UNetWrapper(unet_config, drop_conditions=drop_conditions, drop_scheme=drop_scheme)
self.scheduler_config = scheduler_config
latent_size = image_size//8
if sample_type=='ddim':
self.sampler = SyncDDIMSampler(self, sample_steps , "uniform", 1.0, latent_size=latent_size)
else:
raise NotImplementedError
def _init_clip_projection(self):
self.cc_projection = nn.Linear(772, 768)
nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768])
nn.init.zeros_(list(self.cc_projection.parameters())[1])
self.cc_projection.requires_grad_(True)
if not self.finetune_projection:
disable_training_module(self.cc_projection)
def _init_multiview(self):
K, azs, _, _, poses = read_pickle(f'meta_info/camera-{self.view_num}.pkl')
default_image_size = 256
ratio = self.image_size/default_image_size
K = np.diag([ratio,ratio,1]) @ K
K = torch.from_numpy(K.astype(np.float32)) # [3,3]
K = K.unsqueeze(0).repeat(self.view_num,1,1) # N,3,3
poses = torch.from_numpy(poses.astype(np.float32)) # N,3,4
self.register_buffer('poses', poses)
self.register_buffer('Ks', K)
azs = (azs + np.pi) % (np.pi * 2) - np.pi # scale to [-pi,pi] and the index=0 has az=0
self.register_buffer('azimuth', torch.from_numpy(azs.astype(np.float32)))
def get_viewpoint_embedding(self, batch_size, elevation_ref):
"""
@param batch_size:
@param elevation_ref: B
@return:
"""
azimuth_input = self.azimuth[0].unsqueeze(0) # 1
azimuth_target = self.azimuth # N
elevation_input = -elevation_ref # note that zero123 use a negative elevation here!!!
elevation_target = -np.deg2rad(30)
d_e = elevation_target - elevation_input # B
N = self.azimuth.shape[0]
B = batch_size
d_e = d_e.unsqueeze(1).repeat(1, N)
d_a = azimuth_target - azimuth_input # N
d_a = d_a.unsqueeze(0).repeat(B, 1)
d_z = torch.zeros_like(d_a)
embedding = torch.stack([d_e, torch.sin(d_a), torch.cos(d_a), d_z], -1) # B,N,4
return embedding
def _init_first_stage(self):
first_stage_config={
"target": "ldm.models.autoencoder.AutoencoderKL",
"params": {
"embed_dim": 4,
"monitor": "val/rec_loss",
"ddconfig":{
"double_z": True,
"z_channels": 4,
"resolution": self.image_size,
"in_channels": 3,
"out_ch": 3,
"ch": 128,
"ch_mult": [1,2,4,4],
"num_res_blocks": 2,
"attn_resolutions": [],
"dropout": 0.0
},
"lossconfig": {"target": "torch.nn.Identity"},
}
}
self.first_stage_scale_factor = 0.18215
self.first_stage_model = instantiate_from_config(first_stage_config)
self.first_stage_model = disable_training_module(self.first_stage_model)
def _init_clip_image_encoder(self): | self.clip_image_encoder = FrozenCLIPImageEmbedder(model=self.clip_image_encoder_path) | 9 | 2023-12-21 04:44:00+00:00 | 12k |
OPPOMKLab/u-LLaVA | models/segment_anything/automatic_mask_generator.py | [
{
"identifier": "Sam",
"path": "models/segment_anything/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\n \"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False\n )\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack(\n [self.preprocess(x[\"image\"]) for x in batched_input], dim=0\n )\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n\n dtype = masks.dtype\n\n masks = F.interpolate(\n masks.float(),\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n # masks = masks.to(dtype)\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(\n masks, original_size, mode=\"bilinear\", align_corners=False\n )\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x"
},
{
"identifier": "SamPredictor",
"path": "models/segment_anything/predictor.py",
"snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[\n None, :, :, :\n ]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(\n point_coords, dtype=torch.float, device=self.device\n )\n labels_torch = torch.as_tensor(\n point_labels, dtype=torch.int, device=self.device\n )\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(\n mask_input, dtype=torch.float, device=self.device\n )\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(\n low_res_masks, self.input_size, self.original_size\n )\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert (\n self.features is not None\n ), \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None"
},
{
"identifier": "MaskData",
"path": "models/segment_anything/utils/amg.py",
"snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()"
},
{
"identifier": "area_from_rle",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])"
},
{
"identifier": "batch_iterator",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]"
},
{
"identifier": "batched_mask_to_box",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out"
},
{
"identifier": "box_xyxy_to_xywh",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh"
},
{
"identifier": "build_all_layer_point_grids",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer"
},
{
"identifier": "calculate_stability_score",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions"
},
{
"identifier": "coco_encode_rle",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle"
},
{
"identifier": "generate_crop_boxes",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs"
},
{
"identifier": "is_box_near_crop_edge",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)"
},
{
"identifier": "mask_to_rle_pytorch",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out"
},
{
"identifier": "remove_small_regions",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True"
},
{
"identifier": "rle_to_mask",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order"
},
{
"identifier": "uncrop_boxes_xyxy",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset"
},
{
"identifier": "uncrop_masks",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)"
},
{
"identifier": "uncrop_points",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset"
}
] | from typing import Any, Dict, List, Optional, Tuple
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
from .modeling import Sam
from .predictor import SamPredictor
from .utils.amg import (MaskData, area_from_rle, batch_iterator,
batched_mask_to_box, box_xyxy_to_xywh,
build_all_layer_point_grids, calculate_stability_score,
coco_encode_rle, generate_crop_boxes,
is_box_near_crop_edge, mask_to_rle_pytorch,
remove_small_regions, rle_to_mask, uncrop_boxes_xyxy,
uncrop_masks, uncrop_points)
from pycocotools import \
mask as mask_utils # type: ignore # noqa: F401
import numpy as np
import torch
import cv2 # type: ignore # noqa: F401 | 10,173 | crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
self.point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layers,
crop_n_points_downscale_factor,
)
elif point_grids is not None:
self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
if min_mask_region_area > 0:
self.predictor = SamPredictor(model)
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
@torch.no_grad()
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Generates masks for the given image.
Arguments:
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
Returns:
list(dict(str, any)): A list over records for masks. Each record is
a dict containing the following keys:
segmentation (dict(str, any) or np.ndarray): The mask. If
output_mode='binary_mask', is an array of shape HW. Otherwise,
is a dictionary containing the RLE.
bbox (list(float)): The box around the mask, in XYWH format.
area (int): The area in pixels of the mask.
predicted_iou (float): The model's own prediction of the mask's
quality. This is filtered by the pred_iou_thresh parameter.
point_coords (list(list(float))): The point coordinates input
to the model to generate this mask.
stability_score (float): A measure of the mask's quality. This
is filtered on using the stability_score_thresh parameter.
crop_box (list(float)): The crop of the image used to generate
the mask, given in XYWH format.
"""
# Generate masks
mask_data = self._generate_masks(image)
# Filter small disconnected regions and holes in masks
if self.min_mask_region_area > 0:
mask_data = self.postprocess_small_regions(
mask_data,
self.min_mask_region_area,
max(self.box_nms_thresh, self.crop_nms_thresh),
)
# Encode masks
if self.output_mode == "coco_rle":
mask_data["segmentations"] = [
coco_encode_rle(rle) for rle in mask_data["rles"]
]
elif self.output_mode == "binary_mask":
mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
else:
mask_data["segmentations"] = mask_data["rles"]
# Write mask records
curr_anns = []
for idx in range(len(mask_data["segmentations"])):
ann = {
"segmentation": mask_data["segmentations"][idx],
"area": area_from_rle(mask_data["rles"][idx]),
"bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
"predicted_iou": mask_data["iou_preds"][idx].item(),
"point_coords": [mask_data["points"][idx].tolist()],
"stability_score": mask_data["stability_score"][idx].item(),
"crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
}
curr_anns.append(ann)
return curr_anns
def _generate_masks(self, image: np.ndarray) -> MaskData:
orig_size = image.shape[:2]
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamAutomaticMaskGenerator:
def __init__(
self,
model: Sam,
points_per_side: Optional[int] = 32,
points_per_batch: int = 64,
pred_iou_thresh: float = 0.88,
stability_score_thresh: float = 0.95,
stability_score_offset: float = 1.0,
box_nms_thresh: float = 0.7,
crop_n_layers: int = 0,
crop_nms_thresh: float = 0.7,
crop_overlap_ratio: float = 512 / 1500,
crop_n_points_downscale_factor: int = 1,
point_grids: Optional[List[np.ndarray]] = None,
min_mask_region_area: int = 0,
output_mode: str = "binary_mask",
) -> None:
"""
Using a SAM model, generates masks for the entire image.
Generates a grid of point prompts over the image, then filters
low quality and duplicate masks. The default settings are chosen
for SAM with a ViT-H backbone.
Arguments:
model (Sam): The SAM model to use for mask prediction.
points_per_side (int or None): The number of points to be sampled
along one side of the image. The total number of points is
points_per_side**2. If None, 'point_grids' must provide explicit
point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
self.point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layers,
crop_n_points_downscale_factor,
)
elif point_grids is not None:
self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
if min_mask_region_area > 0:
self.predictor = SamPredictor(model)
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
@torch.no_grad()
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Generates masks for the given image.
Arguments:
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
Returns:
list(dict(str, any)): A list over records for masks. Each record is
a dict containing the following keys:
segmentation (dict(str, any) or np.ndarray): The mask. If
output_mode='binary_mask', is an array of shape HW. Otherwise,
is a dictionary containing the RLE.
bbox (list(float)): The box around the mask, in XYWH format.
area (int): The area in pixels of the mask.
predicted_iou (float): The model's own prediction of the mask's
quality. This is filtered by the pred_iou_thresh parameter.
point_coords (list(list(float))): The point coordinates input
to the model to generate this mask.
stability_score (float): A measure of the mask's quality. This
is filtered on using the stability_score_thresh parameter.
crop_box (list(float)): The crop of the image used to generate
the mask, given in XYWH format.
"""
# Generate masks
mask_data = self._generate_masks(image)
# Filter small disconnected regions and holes in masks
if self.min_mask_region_area > 0:
mask_data = self.postprocess_small_regions(
mask_data,
self.min_mask_region_area,
max(self.box_nms_thresh, self.crop_nms_thresh),
)
# Encode masks
if self.output_mode == "coco_rle":
mask_data["segmentations"] = [
coco_encode_rle(rle) for rle in mask_data["rles"]
]
elif self.output_mode == "binary_mask":
mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
else:
mask_data["segmentations"] = mask_data["rles"]
# Write mask records
curr_anns = []
for idx in range(len(mask_data["segmentations"])):
ann = {
"segmentation": mask_data["segmentations"][idx],
"area": area_from_rle(mask_data["rles"][idx]),
"bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
"predicted_iou": mask_data["iou_preds"][idx].item(),
"point_coords": [mask_data["points"][idx].tolist()],
"stability_score": mask_data["stability_score"][idx].item(),
"crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
}
curr_anns.append(ann)
return curr_anns
def _generate_masks(self, image: np.ndarray) -> MaskData:
orig_size = image.shape[:2] | crop_boxes, layer_idxs = generate_crop_boxes( | 10 | 2023-12-21 08:10:23+00:00 | 12k |
chinhsuanwu/ifusion | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
},
{
"identifier": "default",
"path": "ldm/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "ismap",
"path": "ldm/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "isimage",
"path": "ldm/util.py",
"snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)"
},
{
"identifier": "mean_flat",
"path": "ldm/util.py",
"snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))"
},
{
"identifier": "count_params",
"path": "ldm/util.py",
"snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config, **kwargs):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**kwargs, **config.get(\"params\", dict()))"
},
{
"identifier": "LitEma",
"path": "ldm/modules/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "normal_kl",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean"
},
{
"identifier": "VQModelInterface",
"path": "ldm/models/autoencoder.py",
"snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec"
},
{
"identifier": "IdentityFirstStage",
"path": "ldm/models/autoencoder.py",
"snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x"
},
{
"identifier": "AutoencoderKL",
"path": "ldm/models/autoencoder.py",
"snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x"
},
{
"identifier": "make_beta_schedule",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "extract_into_tensor",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "noise_like",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "DDIMSampler",
"path": "ldm/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n # iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n # for i, step in enumerate(iterator):\n for i, step in enumerate(time_range):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec"
},
{
"identifier": "CrossAttention",
"path": "ldm/modules/attention.py",
"snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)"
}
] | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities import rank_zero_only
from omegaconf import ListConfig
from ldm.util import (
log_txt_as_img,
exists,
default,
ismap,
isimage,
mean_flat,
count_params,
instantiate_from_config,
)
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import (
normal_kl,
DiagonalGaussianDistribution,
)
from ldm.models.autoencoder import (
VQModelInterface,
IdentityFirstStage,
AutoencoderKL,
)
from ldm.modules.diffusionmodules.util import (
make_beta_schedule,
extract_into_tensor,
noise_like,
)
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.modules.attention import CrossAttention | 10,343 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(
self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image_target",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.0,
make_it_fit=False,
ucg_training=None,
):
super().__init__()
assert parameterization in [
"eps",
"x0",
], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
print(
f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode"
)
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(
self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image_target",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.0,
make_it_fit=False,
ucg_training=None,
):
super().__init__()
assert parameterization in [
"eps",
"x0",
], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
print(
f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode"
)
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key) | count_params(self.model, verbose=True) | 6 | 2023-12-17 12:45:38+00:00 | 12k |
wangzhecheng/SkyScript | test_zero_shot_classification.py | [
{
"identifier": "get_cast_dtype",
"path": "src/open_clip/model.py",
"snippet": "def get_cast_dtype(precision: str):\n cast_dtype = None\n if precision == 'bf16':\n cast_dtype = torch.bfloat16\n elif precision == 'fp16':\n cast_dtype = torch.float16\n return cast_dtype"
},
{
"identifier": "trace_model",
"path": "src/open_clip/model.py",
"snippet": "def trace_model(model, batch_size=256, device=torch.device('cpu')):\n model.eval()\n image_size = model.visual.image_size\n example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)\n example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)\n model = torch.jit.trace_module(\n model,\n inputs=dict(\n forward=(example_images, example_text),\n encode_text=(example_text,),\n encode_image=(example_images,)\n ))\n model.visual.image_size = image_size\n return model"
},
{
"identifier": "create_model_and_transforms",
"path": "src/open_clip/factory.py",
"snippet": "def create_model_and_transforms(\n model_name: str,\n pretrained: Optional[str] = None,\n precision: str = 'fp32',\n device: Union[str, torch.device] = 'cpu',\n jit: bool = False,\n force_quick_gelu: bool = False,\n force_custom_text: bool = False,\n force_patch_dropout: Optional[float] = None,\n force_image_size: Optional[Union[int, Tuple[int, int]]] = None,\n pretrained_image: bool = False,\n pretrained_hf: bool = True,\n image_mean: Optional[Tuple[float, ...]] = None,\n image_std: Optional[Tuple[float, ...]] = None,\n aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,\n cache_dir: Optional[str] = None,\n output_dict: Optional[bool] = None,\n):\n model = create_model(\n model_name,\n pretrained,\n precision=precision,\n device=device,\n jit=jit,\n force_quick_gelu=force_quick_gelu,\n force_custom_text=force_custom_text,\n force_patch_dropout=force_patch_dropout,\n force_image_size=force_image_size,\n pretrained_image=pretrained_image,\n pretrained_hf=pretrained_hf,\n cache_dir=cache_dir,\n output_dict=output_dict,\n )\n\n image_mean = image_mean or getattr(model.visual, 'image_mean', None)\n image_std = image_std or getattr(model.visual, 'image_std', None)\n preprocess_train = image_transform(\n model.visual.image_size,\n is_train=True,\n mean=image_mean,\n std=image_std,\n aug_cfg=aug_cfg,\n )\n preprocess_val = image_transform(\n model.visual.image_size,\n is_train=False,\n mean=image_mean,\n std=image_std,\n )\n\n return model, preprocess_train, preprocess_val"
},
{
"identifier": "zero_shot_classifier",
"path": "src/training/zero_shot.py",
"snippet": "def zero_shot_classifier(model, classnames, templates, args):\n tokenizer = get_tokenizer(args.model)\n with torch.no_grad():\n zeroshot_weights = []\n for classname in tqdm(classnames):\n texts = [template(classname) for template in templates] # format with class\n texts = tokenizer(texts).to(args.device) # tokenize\n if args.distributed and not args.horovod:\n class_embeddings = model.module.encode_text(texts)\n else:\n class_embeddings = model.encode_text(texts)\n class_embedding = F.normalize(class_embeddings, dim=-1).mean(dim=0)\n class_embedding /= class_embedding.norm()\n zeroshot_weights.append(class_embedding)\n zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(args.device)\n return zeroshot_weights"
},
{
"identifier": "setup_logging",
"path": "src/training/logger.py",
"snippet": "def setup_logging(log_file, level, include_host=False):\n if include_host:\n import socket\n hostname = socket.gethostname()\n formatter = logging.Formatter(\n f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')\n else:\n formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')\n\n logging.root.setLevel(level)\n loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]\n for logger in loggers:\n logger.setLevel(level)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logging.root.addHandler(stream_handler)\n\n if log_file:\n file_handler = logging.FileHandler(filename=log_file)\n file_handler.setFormatter(formatter)\n logging.root.addHandler(file_handler)"
},
{
"identifier": "is_master",
"path": "src/training/distributed.py",
"snippet": "def is_master(args, local=False):\n return is_local_master(args) if local else is_global_master(args)"
},
{
"identifier": "init_distributed_device",
"path": "src/training/distributed.py",
"snippet": "def init_distributed_device(args):\n # Distributed training = training on more than one GPU.\n # Works in both single and multi-node scenarios.\n args.distributed = False\n args.world_size = 1\n args.rank = 0 # global rank\n args.local_rank = 0\n if args.horovod:\n assert hvd is not None, \"Horovod is not installed\"\n hvd.init()\n args.local_rank = int(hvd.local_rank())\n args.rank = hvd.rank()\n args.world_size = hvd.size()\n args.distributed = True\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n os.environ['RANK'] = str(args.rank)\n os.environ['WORLD_SIZE'] = str(args.world_size)\n elif is_using_distributed():\n if 'SLURM_PROCID' in os.environ:\n # DDP via SLURM\n args.local_rank, args.rank, args.world_size = world_info_from_env()\n # SLURM var -> torch.distributed vars in case needed\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n os.environ['RANK'] = str(args.rank)\n os.environ['WORLD_SIZE'] = str(args.world_size)\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n else:\n # DDP via torchrun, torch.distributed.launch\n args.local_rank, _, _ = world_info_from_env()\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url)\n args.world_size = torch.distributed.get_world_size()\n args.rank = torch.distributed.get_rank()\n args.distributed = True\n\n if torch.cuda.is_available():\n if args.distributed and not args.no_set_device_rank:\n device = 'cuda:%d' % args.local_rank\n else:\n device = 'cuda:0'\n torch.cuda.set_device(device)\n else:\n device = 'cpu'\n args.device = device\n device = torch.device(device)\n return device"
},
{
"identifier": "broadcast_object",
"path": "src/training/distributed.py",
"snippet": "def broadcast_object(args, obj, src=0):\n # broadcast a pickle-able python object from rank-0 to all ranks\n if args.horovod:\n return hvd.broadcast_object(obj, root_rank=src)\n else:\n if args.rank == src:\n objects = [obj]\n else:\n objects = [None]\n dist.broadcast_object_list(objects, src=src)\n return objects[0]"
},
{
"identifier": "get_autocast",
"path": "src/training/precision.py",
"snippet": "def get_autocast(precision):\n if precision == 'amp':\n return torch.cuda.amp.autocast\n elif precision == 'amp_bfloat16' or precision == 'amp_bf16':\n # amp_bfloat16 is more stable than amp float16 for clip training\n return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)\n else:\n return suppress"
},
{
"identifier": "parse_args",
"path": "params.py",
"snippet": "def parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--root-data-dir\",\n type=str,\n default=None,\n help=\"Root directory to datasets\",\n )\n parser.add_argument(\n \"--train-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with training data. When using webdataset, multiple datasources can be combined using the `::` separator.\",\n )\n parser.add_argument(\n \"--train-data-upsampling-factors\",\n type=str,\n default=None,\n help=(\n \"When using multiple data sources with webdataset and sampling with replacement, this can be used to upsample specific data sources. \"\n \"Similar to --train-data, this should be a string with as many numbers as there are data sources, separated by `::` (e.g. 1::2::0.5) \"\n \"By default, datapoints are sampled uniformly regardless of the dataset sizes.\"\n )\n )\n parser.add_argument(\n \"--val-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with validation data\",\n )\n parser.add_argument(\n \"--train-num-samples\",\n type=int,\n default=None,\n help=\"Number of samples in dataset. Required for webdataset if not available in info file.\",\n )\n parser.add_argument(\n \"--val-num-samples\",\n type=int,\n default=None,\n help=\"Number of samples in dataset. Useful for webdataset if not available in info file.\",\n )\n parser.add_argument(\n \"--dataset-type\",\n choices=[\"webdataset\", \"csv\", \"synthetic\", \"auto\"],\n default=\"auto\",\n help=\"Which type of dataset to process.\"\n )\n parser.add_argument(\n \"--dataset-resampled\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use sampling with replacement for webdataset shard selection.\"\n )\n parser.add_argument(\n \"--csv-separator\",\n type=str,\n default=\"\\t\",\n help=\"For csv-like datasets, which separator to use.\"\n )\n parser.add_argument(\n \"--csv-img-key\",\n type=str,\n default=\"filepath\",\n help=\"For csv-like datasets, the name of the key for the image paths.\"\n )\n parser.add_argument(\n \"--csv-caption-key\",\n type=str,\n default=\"title\",\n help=\"For csv-like datasets, the name of the key for the captions.\"\n )\n parser.add_argument(\n \"--imagenet-val\",\n type=str,\n default=None,\n help=\"Path to imagenet val set for conducting zero shot evaluation.\",\n )\n parser.add_argument(\n \"--imagenet-v2\",\n type=str,\n default=None,\n help=\"Path to imagenet v2 for conducting zero shot evaluation.\",\n )\n parser.add_argument(\n \"--logs\",\n type=str,\n default=\"./logs/\",\n help=\"Where to store tensorboard logs. Use None to avoid storing logs.\",\n )\n parser.add_argument(\n \"--log-local\",\n action=\"store_true\",\n default=False,\n help=\"log files on local master, otherwise global master only.\",\n )\n parser.add_argument(\n \"--name\",\n type=str,\n default=None,\n help=\"Optional identifier for the experiment when storing logs. Otherwise use current time.\",\n )\n parser.add_argument(\n \"--workers\", type=int, default=1, help=\"Number of dataloader workers per GPU.\"\n )\n parser.add_argument(\n \"--batch-size\", type=int, default=64, help=\"Batch size per GPU.\"\n )\n parser.add_argument(\n \"--epochs\", type=int, default=32, help=\"Number of epochs to train for.\"\n )\n parser.add_argument(\n \"--epochs-cooldown\", type=int, default=None,\n help=\"When scheduler w/ cooldown used, perform cooldown from total_epochs - cooldown_epochs onwards.\"\n )\n parser.add_argument(\"--lr\", type=float, default=None, help=\"Learning rate.\")\n parser.add_argument(\"--beta1\", type=float, default=None, help=\"Adam beta 1.\")\n parser.add_argument(\"--beta2\", type=float, default=None, help=\"Adam beta 2.\")\n parser.add_argument(\"--eps\", type=float, default=None, help=\"Adam epsilon.\")\n parser.add_argument(\"--wd\", type=float, default=0.2, help=\"Weight decay.\")\n parser.add_argument(\n \"--warmup\", type=int, default=10000, help=\"Number of steps to warmup for.\"\n )\n parser.add_argument(\n \"--use-bn-sync\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use batch norm sync.\")\n parser.add_argument(\n \"--skip-scheduler\",\n action=\"store_true\",\n default=False,\n help=\"Use this flag to skip the learning rate decay.\",\n )\n parser.add_argument(\n \"--lr-scheduler\",\n type=str,\n default='cosine',\n help=\"LR scheduler. One of: 'cosine', 'const' (constant), 'const-cooldown' (constant w/ cooldown). Default: cosine\",\n )\n parser.add_argument(\n \"--lr-cooldown-end\", type=float, default=0.0,\n help=\"End learning rate for cooldown schedule. Default: 0\"\n )\n parser.add_argument(\n \"--lr-cooldown-power\", type=float, default=1.0,\n help=\"Power for polynomial cooldown schedule. Default: 1.0 (linear decay)\"\n )\n parser.add_argument(\n \"--save-frequency\", type=int, default=1, help=\"How often to save checkpoints.\"\n )\n parser.add_argument(\n \"--save-most-recent\",\n action=\"store_true\",\n default=False,\n help=\"Always save the most recent model trained to epoch_latest.pt.\",\n )\n parser.add_argument(\n \"--zeroshot-frequency\", type=int, default=2, help=\"How often to run zero shot.\"\n )\n parser.add_argument(\n \"--val-frequency\", type=int, default=1, help=\"How often to run evaluation with val data.\"\n )\n parser.add_argument(\n \"--resume\",\n default=None,\n type=str,\n help=\"path to latest checkpoint (default: none)\",\n )\n parser.add_argument(\n \"--precision\",\n choices=[\"amp\", \"amp_bf16\", \"amp_bfloat16\", \"bf16\", \"fp16\", \"fp32\"],\n default=\"amp\",\n help=\"Floating point precision.\"\n )\n parser.add_argument(\n \"--model\",\n type=str,\n default=\"RN50\",\n help=\"Name of the vision backbone to use.\",\n )\n parser.add_argument(\n \"--pretrained\",\n default='',\n type=str,\n help=\"Use a pretrained CLIP model weights with the specified tag or file path.\",\n )\n parser.add_argument(\n \"--pretrained-image\",\n default=False,\n action='store_true',\n help=\"Load imagenet pretrained weights for image tower backbone if available.\",\n )\n parser.add_argument(\n \"--lock-image\",\n default=False,\n action='store_true',\n help=\"Lock full image tower by disabling gradients.\",\n )\n parser.add_argument(\n \"--lock-image-unlocked-groups\",\n type=int,\n default=0,\n help=\"Leave last n image tower layer groups unlocked.\",\n )\n parser.add_argument(\n \"--lock-image-freeze-bn-stats\",\n default=False,\n action='store_true',\n help=\"Freeze BatchNorm running stats in image tower for any locked layers.\",\n )\n parser.add_argument(\n '--image-mean', type=float, nargs='+', default=None, metavar='MEAN',\n help='Override default image mean value of dataset')\n parser.add_argument(\n '--image-std', type=float, nargs='+', default=None, metavar='STD',\n help='Override default image std deviation of of dataset')\n parser.add_argument('--aug-cfg', nargs='*', default={}, action=ParseKwargs)\n parser.add_argument(\n \"--grad-checkpointing\",\n default=False,\n action='store_true',\n help=\"Enable gradient checkpointing.\",\n )\n parser.add_argument(\n \"--local-loss\",\n default=False,\n action=\"store_true\",\n help=\"calculate loss w/ local features @ global (instead of realizing full global @ global matrix)\"\n )\n parser.add_argument(\n \"--gather-with-grad\",\n default=False,\n action=\"store_true\",\n help=\"enable full distributed gradient for feature gather\"\n )\n parser.add_argument(\n '--force-image-size', type=int, nargs='+', default=None,\n help='Override default image size'\n )\n parser.add_argument(\n \"--force-quick-gelu\",\n default=False,\n action='store_true',\n help=\"Force use of QuickGELU activation for non-OpenAI transformer models.\",\n )\n parser.add_argument(\n \"--force-patch-dropout\",\n default=None,\n type=float,\n help=\"Override the patch dropout during training, for fine tuning with no dropout near the end as in the paper\",\n )\n parser.add_argument(\n \"--force-custom-text\",\n default=False,\n action='store_true',\n help=\"Force use of CustomTextCLIP model (separate text-tower).\",\n )\n parser.add_argument(\n \"--torchscript\",\n default=False,\n action='store_true',\n help=\"torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'\",\n )\n parser.add_argument(\n \"--trace\",\n default=False,\n action='store_true',\n help=\"torch.jit.trace the model for inference / eval only\",\n )\n parser.add_argument(\n \"--accum-freq\", type=int, default=1, help=\"Update the model every --acum-freq steps.\"\n )\n # arguments for distributed training\n parser.add_argument(\n \"--dist-url\",\n default=\"env://\",\n type=str,\n help=\"url used to set up distributed training\",\n )\n parser.add_argument(\n \"--dist-backend\", default=\"nccl\", type=str, help=\"distributed backend\"\n )\n parser.add_argument(\n \"--report-to\",\n default='',\n type=str,\n help=\"Options are ['wandb', 'tensorboard', 'wandb,tensorboard']\"\n )\n parser.add_argument(\n \"--wandb-notes\",\n default='',\n type=str,\n help=\"Notes if logging with wandb\"\n )\n parser.add_argument(\n \"--wandb-project-name\",\n type=str,\n default='open-clip',\n help=\"Name of the project if logging with wandb.\",\n )\n parser.add_argument(\n \"--debug\",\n default=False,\n action=\"store_true\",\n help=\"If true, more information is logged.\"\n )\n parser.add_argument(\n \"--copy-codebase\",\n default=False,\n action=\"store_true\",\n help=\"If true, we copy the entire base on the log directory, and execute from there.\"\n )\n parser.add_argument(\n \"--horovod\",\n default=False,\n action=\"store_true\",\n help=\"Use horovod for distributed training.\"\n )\n parser.add_argument(\n \"--ddp-static-graph\",\n default=False,\n action='store_true',\n help=\"Enable static graph optimization for DDP in PyTorch >= 1.11.\",\n )\n parser.add_argument(\n \"--no-set-device-rank\",\n default=False,\n action=\"store_true\",\n help=\"Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).\"\n )\n parser.add_argument(\n \"--seed\", type=int, default=0, help=\"Default random seed.\"\n )\n parser.add_argument(\n \"--grad-clip-norm\", type=float, default=None, help=\"Gradient clip.\"\n )\n parser.add_argument(\n \"--lock-text\",\n default=False,\n action='store_true',\n help=\"Lock full text tower by disabling gradients.\",\n )\n parser.add_argument(\n \"--lock-text-unlocked-layers\",\n type=int,\n default=0,\n help=\"Leave last n image tower layer groups unlocked.\",\n )\n parser.add_argument(\n \"--lock-text-freeze-layer-norm\",\n default=False,\n action='store_true',\n help=\"Freeze BatchNorm running stats in image tower for any locked layers.\",\n )\n parser.add_argument(\n \"--log-every-n-steps\",\n type=int,\n default=100,\n help=\"Log every n steps to tensorboard/console/wandb.\",\n )\n parser.add_argument(\n \"--coca-caption-loss-weight\",\n type=float,\n default=2.0,\n help=\"Weight assigned to caption loss in CoCa.\"\n )\n parser.add_argument(\n \"--coca-contrastive-loss-weight\",\n type=float,\n default=1.0,\n help=\"Weight assigned to contrastive loss when training CoCa.\"\n )\n parser.add_argument(\n \"--remote-sync\",\n type=str,\n default=None,\n help=\"Optinoally sync with a remote path specified by this arg\",\n )\n parser.add_argument(\n \"--remote-sync-frequency\",\n type=int,\n default=300,\n help=\"How frequently to sync to a remote directly if --remote-sync is not None.\",\n )\n parser.add_argument(\n \"--remote-sync-protocol\",\n choices=[\"s3\", \"fsspec\"],\n default=\"s3\",\n help=\"How to do the remote sync backup if --remote-sync is not None.\",\n )\n parser.add_argument(\n \"--delete-previous-checkpoint\",\n default=False,\n action=\"store_true\",\n help=\"If true, delete previous checkpoint after storing a new one.\"\n )\n parser.add_argument(\n \"--distill-model\",\n default=None,\n help='Which model arch to distill from, if any.'\n )\n parser.add_argument(\n \"--distill-pretrained\",\n default=None,\n help='Which pre-trained weights to distill from, if any.'\n )\n # newly added flag for adding random rotation into data augmentation\n parser.add_argument(\n \"--random-rotation\",\n action=\"store_true\",\n default=False,\n help=\"If True, add random rotation into image transform for data augmentation (only for training).\"\n )\n # newly added for testing zero-shot and linear probe classification (custom dataset)\n parser.add_argument(\n \"--datasets-for-testing\",\n nargs='*',\n type=str,\n default=None,\n help=\"A list of names of datasets for testing zero-shot classification testing\",\n )\n parser.add_argument(\n \"--classification-mode\",\n type=str,\n default=\"multiclass\",\n help=\"Choose either binary or multiclass\",\n )\n parser.add_argument(\n \"--test-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with test data (e.g., for testing zero-shot classification)\",\n )\n parser.add_argument(\n \"--classnames\",\n type=str,\n default=None,\n help=\"Path to txt file containing class names\",\n )\n parser.add_argument(\n \"--test-data-name\",\n type=str,\n default=None,\n help=\"The name of the test data (e.g., RSICD, EuroSat)\",\n )\n parser.add_argument(\n \"--csv-class-key\",\n type=str,\n default=\"label\",\n help=\"For csv-like datasets, the name of the key for image labels (for classification).\"\n )\n parser.add_argument(\n \"--csv-actual-label-key\",\n type=str,\n default=\"binary\",\n help=\"If classification_model=binary, then specify the name of the key for actual binary labels (i.e., 0/1).\"\n )\n parser.add_argument(\n \"--alpha\",\n type=float,\n default=None,\n help=\"The regularization multiplier of logistic regression to try for linear probing. If None, do a search.\"\n )\n parser.add_argument(\n \"--samples-per-class\",\n type=str,\n default=None,\n help=\"Numbers of samples per class to train logistic regression for linear probing. If None, use full dataset.\"\n )\n parser.add_argument(\n \"--test-result-save-path\",\n type=str,\n default=None,\n help=\"The path to save test results as a pickle file.\"\n )\n parser.add_argument(\n \"--debugging\",\n action=\"store_true\",\n default=False,\n help=\"Whether to use debugging mode, which will return more information.\"\n )\n \n args = parser.parse_args(args)\n\n # If some params are not passed, we use the default values based on model name.\n default_params = get_default_params(args.model)\n for name, val in default_params.items():\n if getattr(args, name) is None:\n setattr(args, name, val)\n\n return args"
},
{
"identifier": "template_dict",
"path": "prompt_templates.py",
"snippet": ""
},
{
"identifier": "BENCHMARK_DATASET_INFOMATION",
"path": "benchmark_dataset_info.py",
"snippet": "BENCHMARK_DATASET_INFOMATION = {\n 'aid': {\n 'classification_mode': 'multiclass',\n 'test_data': BENCHMARK_DATASET_ROOT_DIR + '/aid/aid_img_txt_pairs_test.csv',\n 'classnames': BENCHMARK_DATASET_ROOT_DIR + '/aid/classnames.txt',\n 'csv_separator': ',',\n 'csv_img_key': 'filepath',\n 'csv_class_key': 'label',\n },\n 'eurosat': {\n 'classification_mode': 'multiclass',\n 'test_data': BENCHMARK_DATASET_ROOT_DIR + '/eurosat/eurosat_img_txt_pairs_test.csv',\n 'classnames': BENCHMARK_DATASET_ROOT_DIR + '/eurosat/classnames.txt',\n 'csv_separator': ',',\n 'csv_img_key': 'filepath',\n 'csv_class_key': 'label',\n },\n 'fmow': {\n 'classification_mode': 'multiclass',\n 'test_data': BENCHMARK_DATASET_ROOT_DIR + '/fmow/fmow_img_txt_pairs_val.csv',\n 'classnames': BENCHMARK_DATASET_ROOT_DIR + '/fmow/classnames.txt',\n 'csv_separator': ',',\n 'csv_img_key': 'filepath',\n 'csv_class_key': 'label',\n },\n 'nwpu': {\n 'classification_mode': 'multiclass',\n 'test_data': BENCHMARK_DATASET_ROOT_DIR + '/nwpu/img_txt_pairs_train.csv',\n 'classnames': BENCHMARK_DATASET_ROOT_DIR + '/nwpu/classnames.txt',\n 'csv_separator': ',',\n 'csv_img_key': 'filepath',\n 'csv_class_key': 'label',\n },\n 'patternnet': {\n 'classification_mode': 'multiclass',\n 'test_data': BENCHMARK_DATASET_ROOT_DIR + '/patternnet/img_txt_pairs_train.csv',\n 'classnames': BENCHMARK_DATASET_ROOT_DIR + '/patternnet/classnames.txt',\n 'csv_separator': ',',\n 'csv_img_key': 'filepath',\n 'csv_class_key': 'label',\n },\n 'SkyScript_cls': {\n 'classification_mode': 'multiclass',\n 'test_data': BENCHMARK_DATASET_ROOT_DIR + '/SkyScript_cls/img_txt_pairs_val.csv',\n 'classnames': BENCHMARK_DATASET_ROOT_DIR + '/SkyScript_cls/classnames.txt',\n 'csv_separator': ',',\n 'csv_img_key': 'filepath',\n 'csv_class_key': 'label',\n },\n 'millionaid': {\n 'classification_mode': 'multiclass',\n 'test_data': BENCHMARK_DATASET_ROOT_DIR + '/millionaid/img_txt_pairs_train.csv',\n 'classnames': BENCHMARK_DATASET_ROOT_DIR + '/millionaid/classnames.txt',\n 'csv_separator': ',',\n 'csv_img_key': 'filepath',\n 'csv_class_key': 'label',\n },\n 'rsicb': {\n 'classification_mode': 'multiclass',\n 'test_data': BENCHMARK_DATASET_ROOT_DIR + '/rsicb256/img_txt_pairs_train.csv',\n 'classnames': BENCHMARK_DATASET_ROOT_DIR + '/rsicb256/classnames.txt',\n 'csv_separator': ',',\n 'csv_img_key': 'filepath',\n 'csv_class_key': 'label',\n },\n}"
}
] | import torch
import numpy as np
import os
import sys
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import torch.nn.functional as F
import random
from PIL import Image
from os.path import join, exists
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from src.open_clip.model import get_cast_dtype, trace_model
from src.open_clip.factory import create_model_and_transforms
from src.training.zero_shot import zero_shot_classifier
from src.training.logger import setup_logging
from src.training.distributed import is_master, init_distributed_device, broadcast_object
from src.training.precision import get_autocast
from params import parse_args
from prompt_templates import template_dict
from benchmark_dataset_info import BENCHMARK_DATASET_INFOMATION | 7,681 |
Image.MAX_IMAGE_PIXELS = 1000000000
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
class CsvDatasetForClassification(Dataset):
"""Dataset for multiclass classification"""
def __init__(self, input_filename, transforms, img_key, label_key, classnames, sep="\t", debugging=False, root_data_dir=None):
# logging.debug(f'Loading csv data from {input_filename}.')
df = pd.read_csv(input_filename, sep=sep)
df = df[df[label_key].isnull() == False]
if root_data_dir is not None:
df[img_key] = df[img_key].apply(lambda x: join(root_data_dir, x))
self.images = df[img_key].tolist()
self.labels = df[label_key].tolist()
self.transforms = transforms
self.debugging = debugging
# mapping classname to class index
if type(self.labels[0]) == str:
self.label2idx = {x: i for i, x in enumerate(classnames)}
self.label_indices = [self.label2idx[x] for x in self.labels]
else:
self.idx2label = {i: x for i, x in enumerate(classnames)}
self.label_indices = self.labels
# logging.debug('Done loading data.')
def __len__(self):
return len(self.label_indices)
def __getitem__(self, idx):
images = self.transforms(Image.open(str(self.images[idx])))
if self.debugging:
return images, self.label_indices[idx], self.images[idx]
else:
return images, self.label_indices[idx]
class CsvDatasetForClassificationBinary(Dataset):
"""Dataset for binary classification"""
def __init__(self, input_filename, transforms, img_key, label_key, actual_label_key, classnames, sep="\t",
debugging=False, root_data_dir=None):
# logging.debug(f'Loading csv data from {input_filename}.')
df = pd.read_csv(input_filename, sep=sep)
df = df[df[label_key].isnull() == False]
if root_data_dir is not None:
df[img_key] = df[img_key].apply(lambda x: join(root_data_dir, x))
self.images = df[img_key].tolist()
self.labels = df[label_key].tolist()
self.actual_labels = df[actual_label_key].tolist()
self.transforms = transforms
self.debugging = debugging
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
images = self.transforms(Image.open(str(self.images[idx])))
if self.debugging:
return images, self.actual_labels[idx], self.images[idx]
else:
return images, self.actual_labels[idx]
def test_zero_shot_classification(model, dataloader, label_list, is_binary, args, dataset_name='unnamed', debugging=False):
# logging.info('Starting zero-shot classification test.')
templates = template_dict[dataset_name]
model.eval()
classifier = zero_shot_classifier(model, label_list, templates, args) # [dim_embedding, N_class]
if is_binary:
results = run_binary(model, classifier, dataloader, args, dataset_name=dataset_name, debugging=debugging)
else:
results = run(model, classifier, dataloader, args, dataset_name=dataset_name, debugging=debugging)
return results
def accuracy(output, target, topk=(1,)):
pred = output.topk(max(topk), 1, True, True)[1].t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk]
def run(model, classifier, dataloader, args, dataset_name='unnamed', debugging=False):
|
Image.MAX_IMAGE_PIXELS = 1000000000
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
class CsvDatasetForClassification(Dataset):
"""Dataset for multiclass classification"""
def __init__(self, input_filename, transforms, img_key, label_key, classnames, sep="\t", debugging=False, root_data_dir=None):
# logging.debug(f'Loading csv data from {input_filename}.')
df = pd.read_csv(input_filename, sep=sep)
df = df[df[label_key].isnull() == False]
if root_data_dir is not None:
df[img_key] = df[img_key].apply(lambda x: join(root_data_dir, x))
self.images = df[img_key].tolist()
self.labels = df[label_key].tolist()
self.transforms = transforms
self.debugging = debugging
# mapping classname to class index
if type(self.labels[0]) == str:
self.label2idx = {x: i for i, x in enumerate(classnames)}
self.label_indices = [self.label2idx[x] for x in self.labels]
else:
self.idx2label = {i: x for i, x in enumerate(classnames)}
self.label_indices = self.labels
# logging.debug('Done loading data.')
def __len__(self):
return len(self.label_indices)
def __getitem__(self, idx):
images = self.transforms(Image.open(str(self.images[idx])))
if self.debugging:
return images, self.label_indices[idx], self.images[idx]
else:
return images, self.label_indices[idx]
class CsvDatasetForClassificationBinary(Dataset):
"""Dataset for binary classification"""
def __init__(self, input_filename, transforms, img_key, label_key, actual_label_key, classnames, sep="\t",
debugging=False, root_data_dir=None):
# logging.debug(f'Loading csv data from {input_filename}.')
df = pd.read_csv(input_filename, sep=sep)
df = df[df[label_key].isnull() == False]
if root_data_dir is not None:
df[img_key] = df[img_key].apply(lambda x: join(root_data_dir, x))
self.images = df[img_key].tolist()
self.labels = df[label_key].tolist()
self.actual_labels = df[actual_label_key].tolist()
self.transforms = transforms
self.debugging = debugging
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
images = self.transforms(Image.open(str(self.images[idx])))
if self.debugging:
return images, self.actual_labels[idx], self.images[idx]
else:
return images, self.actual_labels[idx]
def test_zero_shot_classification(model, dataloader, label_list, is_binary, args, dataset_name='unnamed', debugging=False):
# logging.info('Starting zero-shot classification test.')
templates = template_dict[dataset_name]
model.eval()
classifier = zero_shot_classifier(model, label_list, templates, args) # [dim_embedding, N_class]
if is_binary:
results = run_binary(model, classifier, dataloader, args, dataset_name=dataset_name, debugging=debugging)
else:
results = run(model, classifier, dataloader, args, dataset_name=dataset_name, debugging=debugging)
return results
def accuracy(output, target, topk=(1,)):
pred = output.topk(max(topk), 1, True, True)[1].t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk]
def run(model, classifier, dataloader, args, dataset_name='unnamed', debugging=False): | autocast = get_autocast(args.precision) | 8 | 2023-12-19 11:50:56+00:00 | 12k |
Lavreniuk/EVP | depth/models_depth/model.py | [
{
"identifier": "UNetWrapper",
"path": "evp/models.py",
"snippet": "class UNetWrapper(nn.Module):\n def __init__(self, unet, use_attn=True, base_size=512, max_attn_size=None, attn_selector='up_cross+down_cross') -> None:\n super().__init__()\n self.unet = unet\n self.attention_store = AttentionStore(base_size=base_size // 8, max_size=max_attn_size)\n self.size16 = base_size // 32\n self.size32 = base_size // 16\n self.size64 = base_size // 8\n self.use_attn = use_attn\n if self.use_attn:\n register_attention_control(unet, self.attention_store)\n register_hier_output(unet)\n self.attn_selector = attn_selector.split('+')\n\n def forward(self, *args, **kwargs):\n if self.use_attn:\n self.attention_store.reset()\n out_list = self.unet(*args, **kwargs)\n if self.use_attn:\n avg_attn = self.attention_store.get_average_attention()\n attn16, attn32, attn64 = self.process_attn(avg_attn)\n out_list[1] = torch.cat([out_list[1], attn16], dim=1)\n out_list[2] = torch.cat([out_list[2], attn32], dim=1)\n if attn64 is not None:\n out_list[3] = torch.cat([out_list[3], attn64], dim=1)\n return out_list[::-1]\n\n def process_attn(self, avg_attn):\n attns = {self.size16: [], self.size32: [], self.size64: []}\n for k in self.attn_selector:\n for up_attn in avg_attn[k]:\n size = int(math.sqrt(up_attn.shape[1]))\n attns[size].append(rearrange(up_attn, 'b (h w) c -> b c h w', h=size))\n attn16 = torch.stack(attns[self.size16]).mean(0)\n attn32 = torch.stack(attns[self.size32]).mean(0)\n if len(attns[self.size64]) > 0:\n attn64 = torch.stack(attns[self.size64]).mean(0)\n else:\n attn64 = None\n return attn16, attn32, attn64"
},
{
"identifier": "TextAdapterRefer",
"path": "evp/models.py",
"snippet": "class TextAdapterRefer(nn.Module):\n def __init__(self, text_dim=768):\n super().__init__()\n \n self.fc = nn.Sequential(\n nn.Linear(text_dim, text_dim),\n nn.GELU(),\n nn.Linear(text_dim, text_dim)\n )\n\n def forward(self, latents, texts, gamma):\n texts_after = self.fc(texts)\n texts = texts + gamma * texts_after\n return texts"
},
{
"identifier": "FrozenCLIPEmbedder",
"path": "evp/models.py",
"snippet": "class FrozenCLIPEmbedder(nn.Module):\n \"\"\"Uses the CLIP transformer encoder for text (from Hugging Face)\"\"\"\n def __init__(self, version=\"openai/clip-vit-large-patch14\", device=\"cuda\", max_length=77, pool=True):\n super().__init__()\n self.tokenizer = CLIPTokenizer.from_pretrained(version)\n self.transformer = CLIPTextModel.from_pretrained(version)\n self.device = device\n self.max_length = max_length\n self.freeze()\n\n self.pool = pool\n\n def freeze(self):\n self.transformer = self.transformer.eval()\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, text):\n batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,\n return_overflowing_tokens=False, padding=\"max_length\", return_tensors=\"pt\")\n tokens = batch_encoding[\"input_ids\"].to(self.device)\n outputs = self.transformer(input_ids=tokens)\n\n if self.pool:\n z = outputs.pooler_output\n else:\n z = outputs.last_hidden_state\n return z\n\n def encode(self, text):\n return self(text)"
},
{
"identifier": "mViT",
"path": "depth/models_depth/miniViT.py",
"snippet": "class mViT(nn.Module):\n def __init__(self, in_channels, n_query_channels=128, patch_size=16, dim_out=256,\n embedding_dim=128, num_heads=4, norm='linear'):\n super(mViT, self).__init__()\n self.norm = norm\n self.n_query_channels = n_query_channels\n self.patch_transformer = PatchTransformerEncoder(in_channels, patch_size, embedding_dim, num_heads)\n self.dot_product_layer = PixelWiseDotProduct()\n\n self.conv3x3 = nn.Conv2d(in_channels, embedding_dim, kernel_size=3, stride=1, padding=1)\n self.regressor = nn.Sequential(nn.Linear(embedding_dim, 256),\n nn.LeakyReLU(),\n nn.Linear(256, 256),\n nn.LeakyReLU(),\n nn.Linear(256, dim_out))\n\n def forward(self, x):\n # n, c, h, w = x.size()\n tgt = self.patch_transformer(x.clone()) # .shape = S, N, E\n\n x = self.conv3x3(x)\n\n regression_head, queries = tgt[0, ...], tgt[1:self.n_query_channels + 1, ...]\n\n # Change from S, N, E to N, S, E\n queries = queries.permute(1, 0, 2)\n range_attention_maps = self.dot_product_layer(x, queries) # .shape = n, n_query_channels, h, w\n\n y = self.regressor(regression_head) # .shape = N, dim_out\n if self.norm == 'linear':\n y = torch.relu(y)\n eps = 0.1\n y = y + eps\n elif self.norm == 'softmax':\n return torch.softmax(y, dim=1), range_attention_maps\n else:\n y = torch.sigmoid(y)\n y = y / y.sum(dim=1, keepdim=True)\n return y, range_attention_maps"
},
{
"identifier": "AttractorLayer",
"path": "depth/models_depth/attractor.py",
"snippet": "class AttractorLayer(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n eps = 1e-3\n A = A + eps\n n, c, h, w = A.shape\n A = A.view(n, self.n_attractors, 2, h, w)\n A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w\n A_normed = A[:, :, 0, ...] # n, na, h, w\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(dist(A_normed.unsqueeze(\n 2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n # .shape N, nbins, h, w\n delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers)\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = (self.max_depth - self.min_depth) * \\\n b_new_centers + self.min_depth\n B_centers, _ = torch.sort(B_centers, dim=1)\n B_centers = torch.clip(B_centers, self.min_depth, self.max_depth)\n return b_new_centers, B_centers"
},
{
"identifier": "AttractorLayerUnnormed",
"path": "depth/models_depth/attractor.py",
"snippet": "class AttractorLayerUnnormed(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are unbounded\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n n, c, h, w = A.shape\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(\n dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n delta_c += dist(A[:, i, ...].unsqueeze(1) -\n b_centers) # .shape N, nbins, h, w\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = b_new_centers\n\n return b_new_centers, B_centers"
},
{
"identifier": "ConditionalLogBinomial",
"path": "depth/models_depth/dist_layers.py",
"snippet": "class ConditionalLogBinomial(nn.Module):\n def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax):\n \"\"\"Conditional Log Binomial distribution\n\n Args:\n in_features (int): number of input channels in main feature\n condition_dim (int): number of input channels in condition feature\n n_classes (int, optional): Number of classes. Defaults to 256.\n bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.\n p_eps (float, optional): small eps value. Defaults to 1e-4.\n max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.\n min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7.\n \"\"\"\n super().__init__()\n self.p_eps = p_eps\n self.max_temp = max_temp\n self.min_temp = min_temp\n self.log_binomial_transform = LogBinomial(n_classes, act=act)\n bottleneck = (in_features + condition_dim) // bottleneck_factor\n self.mlp = nn.Sequential(\n nn.Conv2d(in_features + condition_dim, bottleneck,\n kernel_size=1, stride=1, padding=0),\n nn.GELU(),\n # 2 for p linear norm, 2 for t linear norm\n nn.Conv2d(bottleneck, 2+2, kernel_size=1, stride=1, padding=0),\n nn.Softplus()\n )\n\n def forward(self, x, cond):\n \"\"\"Forward pass\n\n Args:\n x (torch.Tensor - NCHW): Main feature\n cond (torch.Tensor - NCHW): condition feature\n\n Returns:\n torch.Tensor: Output log binomial distribution\n \"\"\"\n pt = self.mlp(torch.concat((x, cond), dim=1))\n p, t = pt[:, :2, ...], pt[:, 2:, ...]\n\n p = p + self.p_eps\n p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...])\n\n t = t + self.p_eps\n t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...])\n t = t.unsqueeze(1)\n t = (self.max_temp - self.min_temp) * t + self.min_temp\n\n return self.log_binomial_transform(p, t)"
},
{
"identifier": "Projector",
"path": "depth/models_depth/localbins_layers.py",
"snippet": "class Projector(nn.Module):\n def __init__(self, in_features, out_features, mlp_dim=128):\n \"\"\"Projector MLP\n\n Args:\n in_features (int): input channels\n out_features (int): output channels\n mlp_dim (int, optional): hidden dimension. Defaults to 128.\n \"\"\"\n super().__init__()\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, out_features, 1, 1, 0),\n )\n\n def forward(self, x):\n return self._net(x)"
},
{
"identifier": "SeedBinRegressor",
"path": "depth/models_depth/localbins_layers.py",
"snippet": "class SeedBinRegressor(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval.\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Min depth value. Defaults to 1e-3.\n max_depth (float, optional): Max depth value. Defaults to 10.\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self.min_depth = min_depth\n self.max_depth = max_depth\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B = self._net(x)\n eps = 1e-3\n B = B + eps\n B_widths_normed = B / B.sum(dim=1, keepdim=True)\n B_widths = (self.max_depth - self.min_depth) * \\\n B_widths_normed # .shape NCHW\n # pad has the form (left, right, top, bottom, front, back)\n B_widths = nn.functional.pad(\n B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth)\n B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW\n\n B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...])\n return B_widths_normed, B_centers"
},
{
"identifier": "SeedBinRegressorUnnormed",
"path": "depth/models_depth/localbins_layers.py",
"snippet": "class SeedBinRegressorUnnormed(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are unbounded\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B_centers = self._net(x)\n return B_centers, B_centers"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from timm.models.layers import trunc_normal_, DropPath
from mmcv.cnn import (build_conv_layer, build_norm_layer, build_upsample_layer,
constant_init, normal_init)
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from evp.models import UNetWrapper, TextAdapterRefer, FrozenCLIPEmbedder
from .miniViT import mViT
from .attractor import AttractorLayer, AttractorLayerUnnormed
from .dist_layers import ConditionalLogBinomial
from .localbins_layers import (Projector, SeedBinRegressor, SeedBinRegressorUnnormed) | 7,819 | if dataset == 'kitti':
self.unet = UNetWrapper(sd_model.model, use_attn=True, base_size=384)
del sd_model.cond_stage_model
del self.encoder_vq.decoder
del self.unet.unet.diffusion_model.out
del self.encoder_vq.post_quant_conv.weight
del self.encoder_vq.post_quant_conv.bias
for param in self.encoder_vq.parameters():
param.requires_grad = True
self.text_adapter = TextAdapterRefer(text_dim=text_dim)
self.gamma = nn.Parameter(torch.ones(text_dim) * 1e-4)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if caption_aggregation:
class_embeddings = torch.load(f'{dataset}_class_embeddings_my_captions.pth', map_location=device)
#class_embeddings_list = [value['class_embeddings'] for key, value in class_embeddings.items()]
#stacked_embeddings = torch.stack(class_embeddings_list, dim=0)
#class_embeddings = torch.mean(stacked_embeddings, dim=0).unsqueeze(0)
if 'aggregated' in class_embeddings:
class_embeddings = class_embeddings['aggregated']
else:
clip_model = FrozenCLIPEmbedder(max_length=40,pool=False).to(device)
class_embeddings_new = [clip_model.encode(value['caption'][0]) for key, value in class_embeddings.items()]
class_embeddings_new = torch.mean(torch.stack(class_embeddings_new, dim=0), dim=0)
class_embeddings['aggregated'] = class_embeddings_new
torch.save(class_embeddings, f'{dataset}_class_embeddings_my_captions.pth')
class_embeddings = class_embeddings['aggregated']
self.register_buffer('class_embeddings', class_embeddings)
else:
self.class_embeddings = torch.load(f'{dataset}_class_embeddings_my_captions.pth', map_location=device)
self.clip_model = FrozenCLIPEmbedder(max_length=40,pool=False)
for param in self.clip_model.parameters():
param.requires_grad = True
#if dataset == 'kitti':
# self.text_adapter_ = TextAdapterRefer(text_dim=text_dim)
# self.gamma_ = nn.Parameter(torch.ones(text_dim) * 1e-4)
self.caption_aggregation = caption_aggregation
self.dataset = dataset
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, feats):
x = self.ldm_to_net[0](feats[0])
for i in range(3):
if i > 0:
x = x + self.ldm_to_net[i](feats[i])
x = self.layers[i](x)
x = self.upsample_layers[i](x)
return self.out_conv(x)
def forward(self, x, class_ids=None, img_paths=None):
latents = self.encoder_vq.encode(x).mode()
# add division by std
if self.dataset == 'nyu':
latents = latents / 5.07543
elif self.dataset == 'kitti':
latents = latents / 4.6211
else:
print('Please calculate the STD for the dataset!')
if class_ids is not None:
if self.caption_aggregation:
class_embeddings = self.class_embeddings[[0]*len(class_ids.tolist())]#[class_ids.tolist()]
else:
class_embeddings = []
for img_path in img_paths:
class_embeddings.extend([value['caption'][0] for key, value in self.class_embeddings.items() if key in img_path.replace('//', '/')])
class_embeddings = self.clip_model.encode(class_embeddings)
else:
class_embeddings = self.class_embeddings
c_crossattn = self.text_adapter(latents, class_embeddings, self.gamma)
t = torch.ones((x.shape[0],), device=x.device).long()
#if self.dataset == 'kitti':
# c_crossattn_last = self.text_adapter_(latents, class_embeddings, self.gamma_)
# outs = self.unet(latents, t, c_crossattn=[c_crossattn, c_crossattn_last])
#else:
outs = self.unet(latents, t, c_crossattn=[c_crossattn])
outs = self.aggregation(outs)
feats = [outs[0], outs[1], torch.cat([outs[2], F.interpolate(outs[3], scale_factor=2)], dim=1)]
x = torch.cat([self.layer1(feats[0]), self.layer2(feats[1]), feats[2]], dim=1)
return self.out_layer(x)
def get_latent(self, x):
return self.encoder_vq.encode(x).mode()
class EVPDepth(nn.Module):
def __init__(self, args=None, caption_aggregation=False):
super().__init__()
self.max_depth = args.max_depth
self.min_depth = args.min_depth_eval
embed_dim = 192
channels_in = embed_dim*8
channels_out = embed_dim
if args.dataset == 'nyudepthv2':
self.encoder = EVPDepthEncoder(out_dim=channels_in, dataset='nyu', caption_aggregation=caption_aggregation)
else:
self.encoder = EVPDepthEncoder(out_dim=channels_in, dataset='kitti', caption_aggregation=caption_aggregation)
self.decoder = Decoder(channels_in, channels_out, args)
self.decoder.init_weights()
| # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# The deconvolution code is based on Simple Baseline.
# (https://github.com/microsoft/human-pose-estimation.pytorch/blob/master/lib/models/pose_resnet.py)
# Modified by Zigang Geng ([email protected]).
# ------------------------------------------------------------------------------
def icnr(x, scale=2, init=nn.init.kaiming_normal_):
"""
Checkerboard artifact free sub-pixel convolution
https://arxiv.org/abs/1707.02937
"""
ni,nf,h,w = x.shape
ni2 = int(ni/(scale**2))
k = init(torch.zeros([ni2,nf,h,w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale**2)
k = k.contiguous().view([nf,ni,h,w]).transpose(0, 1)
x.data.copy_(k)
class PixelShuffle(nn.Module):
"""
Real-Time Single Image and Video Super-Resolution
https://arxiv.org/abs/1609.05158
"""
def __init__(self, n_channels, scale):
super(PixelShuffle, self).__init__()
self.conv = nn.Conv2d(n_channels, n_channels*(scale**2), kernel_size=1)
icnr(self.conv.weight)
self.shuf = nn.PixelShuffle(scale)
self.relu = nn.ReLU()
def forward(self,x):
x = self.shuf(self.relu(self.conv(x)))
return x
class AttentionModule(nn.Module):
def __init__(self, in_channels, out_channels):
super(AttentionModule, self).__init__()
# Convolutional Layers
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
# Group Normalization
self.group_norm = nn.GroupNorm(20, out_channels)
# ReLU Activation
self.relu = nn.ReLU()
# Spatial Attention
self.spatial_attention = nn.Sequential(
nn.Conv2d(in_channels, 1, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x):
# Apply spatial attention
spatial_attention = self.spatial_attention(x)
x = x * spatial_attention
# Apply convolutional layer
x = self.conv1(x)
x = self.group_norm(x)
x = self.relu(x)
return x
class AttentionDownsamplingModule(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=2):
super(AttentionDownsamplingModule, self).__init__()
# Spatial Attention
self.spatial_attention = nn.Sequential(
nn.Conv2d(in_channels, 1, kernel_size=1),
nn.Sigmoid()
)
# Channel Attention
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, in_channels // 8, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels // 8, in_channels, kernel_size=1),
nn.Sigmoid()
)
# Convolutional Layers
if scale_factor == 2:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
elif scale_factor == 4:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=2, padding=1)
# Group Normalization
self.group_norm = nn.GroupNorm(20, out_channels)
# ReLU Activation
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
# Apply spatial attention
spatial_attention = self.spatial_attention(x)
x = x * spatial_attention
# Apply channel attention
channel_attention = self.channel_attention(x)
x = x * channel_attention
# Apply convolutional layers
x = self.conv1(x)
x = self.group_norm(x)
x = self.relu(x)
x = self.conv2(x)
x = self.group_norm(x)
x = self.relu(x)
return x
class AttentionUpsamplingModule(nn.Module):
def __init__(self, in_channels, out_channels):
super(AttentionUpsamplingModule, self).__init__()
# Spatial Attention for outs[2]
self.spatial_attention = nn.Sequential(
nn.Conv2d(in_channels, 1, kernel_size=1),
nn.Sigmoid()
)
# Channel Attention for outs[2]
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, in_channels // 8, kernel_size=1),
nn.ReLU(),
nn.Conv2d(in_channels // 8, in_channels, kernel_size=1),
nn.Sigmoid()
)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
# Group Normalization
self.group_norm = nn.GroupNorm(20, out_channels)
# ReLU Activation
self.relu = nn.ReLU()
self.upscale = PixelShuffle(in_channels, 2)
def forward(self, x):
# Apply spatial attention
spatial_attention = self.spatial_attention(x)
x = x * spatial_attention
# Apply channel attention
channel_attention = self.channel_attention(x)
x = x * channel_attention
# Apply convolutional layers
x = self.conv1(x)
x = self.group_norm(x)
x = self.relu(x)
x = self.conv2(x)
x = self.group_norm(x)
x = self.relu(x)
# Upsample
x = self.upscale(x)
return x
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvLayer, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.GroupNorm(20, out_channels),
nn.ReLU(),
)
def forward(self, x):
x = self.conv1(x)
return x
class InverseMultiAttentiveFeatureRefinement(nn.Module):
def __init__(self, in_channels_list):
super(InverseMultiAttentiveFeatureRefinement, self).__init__()
self.layer1 = AttentionModule(in_channels_list[0], in_channels_list[0])
self.layer2 = AttentionDownsamplingModule(in_channels_list[0], in_channels_list[0]//2, scale_factor = 2)
self.layer3 = ConvLayer(in_channels_list[0]//2 + in_channels_list[1], in_channels_list[1])
self.layer4 = AttentionDownsamplingModule(in_channels_list[1], in_channels_list[1]//2, scale_factor = 2)
self.layer5 = ConvLayer(in_channels_list[1]//2 + in_channels_list[2], in_channels_list[2])
self.layer6 = AttentionDownsamplingModule(in_channels_list[2], in_channels_list[2]//2, scale_factor = 2)
self.layer7 = ConvLayer(in_channels_list[2]//2 + in_channels_list[3], in_channels_list[3])
'''
self.layer8 = AttentionUpsamplingModule(in_channels_list[3], in_channels_list[3])
self.layer9 = ConvLayer(in_channels_list[2] + in_channels_list[3], in_channels_list[2])
self.layer10 = AttentionUpsamplingModule(in_channels_list[2], in_channels_list[2])
self.layer11 = ConvLayer(in_channels_list[1] + in_channels_list[2], in_channels_list[1])
self.layer12 = AttentionUpsamplingModule(in_channels_list[1], in_channels_list[1])
self.layer13 = ConvLayer(in_channels_list[0] + in_channels_list[1], in_channels_list[0])
'''
def forward(self, inputs):
x_c4, x_c3, x_c2, x_c1 = inputs
x_c4 = self.layer1(x_c4)
x_c4_3 = self.layer2(x_c4)
x_c3 = torch.cat([x_c4_3, x_c3], dim=1)
x_c3 = self.layer3(x_c3)
x_c3_2 = self.layer4(x_c3)
x_c2 = torch.cat([x_c3_2, x_c2], dim=1)
x_c2 = self.layer5(x_c2)
x_c2_1 = self.layer6(x_c2)
x_c1 = torch.cat([x_c2_1, x_c1], dim=1)
x_c1 = self.layer7(x_c1)
'''
x_c1_2 = self.layer8(x_c1)
x_c2 = torch.cat([x_c1_2, x_c2], dim=1)
x_c2 = self.layer9(x_c2)
x_c2_3 = self.layer10(x_c2)
x_c3 = torch.cat([x_c2_3, x_c3], dim=1)
x_c3 = self.layer11(x_c3)
x_c3_4 = self.layer12(x_c3)
x_c4 = torch.cat([x_c3_4, x_c4], dim=1)
x_c4 = self.layer13(x_c4)
'''
return [x_c4, x_c3, x_c2, x_c1]
class EVPDepthEncoder(nn.Module):
def __init__(self, out_dim=1024, ldm_prior=[320, 680, 1320+1280], sd_path=None, text_dim=768,
dataset='nyu', caption_aggregation=False
):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
nn.GroupNorm(16, ldm_prior[0]),
nn.ReLU(),
nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
)
self.layer2 = nn.Sequential(
nn.Conv2d(ldm_prior[1], ldm_prior[1], 3, stride=2, padding=1),
)
self.out_layer = nn.Sequential(
nn.Conv2d(sum(ldm_prior), out_dim, 1),
nn.GroupNorm(16, out_dim),
nn.ReLU(),
)
self.aggregation = InverseMultiAttentiveFeatureRefinement([320, 680, 1320, 1280])
self.apply(self._init_weights)
### stable diffusion layers
config = OmegaConf.load('./v1-inference.yaml')
if sd_path is None:
if os.path.exists('../checkpoints/v1-5-pruned-emaonly.ckpt'):
config.model.params.ckpt_path = '../checkpoints/v1-5-pruned-emaonly.ckpt'
else:
config.model.params.ckpt_path = None
else:
config.model.params.ckpt_path = f'../{sd_path}'
sd_model = instantiate_from_config(config.model)
self.encoder_vq = sd_model.first_stage_model
self.unet = UNetWrapper(sd_model.model, use_attn=True)
if dataset == 'kitti':
self.unet = UNetWrapper(sd_model.model, use_attn=True, base_size=384)
del sd_model.cond_stage_model
del self.encoder_vq.decoder
del self.unet.unet.diffusion_model.out
del self.encoder_vq.post_quant_conv.weight
del self.encoder_vq.post_quant_conv.bias
for param in self.encoder_vq.parameters():
param.requires_grad = True
self.text_adapter = TextAdapterRefer(text_dim=text_dim)
self.gamma = nn.Parameter(torch.ones(text_dim) * 1e-4)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if caption_aggregation:
class_embeddings = torch.load(f'{dataset}_class_embeddings_my_captions.pth', map_location=device)
#class_embeddings_list = [value['class_embeddings'] for key, value in class_embeddings.items()]
#stacked_embeddings = torch.stack(class_embeddings_list, dim=0)
#class_embeddings = torch.mean(stacked_embeddings, dim=0).unsqueeze(0)
if 'aggregated' in class_embeddings:
class_embeddings = class_embeddings['aggregated']
else:
clip_model = FrozenCLIPEmbedder(max_length=40,pool=False).to(device)
class_embeddings_new = [clip_model.encode(value['caption'][0]) for key, value in class_embeddings.items()]
class_embeddings_new = torch.mean(torch.stack(class_embeddings_new, dim=0), dim=0)
class_embeddings['aggregated'] = class_embeddings_new
torch.save(class_embeddings, f'{dataset}_class_embeddings_my_captions.pth')
class_embeddings = class_embeddings['aggregated']
self.register_buffer('class_embeddings', class_embeddings)
else:
self.class_embeddings = torch.load(f'{dataset}_class_embeddings_my_captions.pth', map_location=device)
self.clip_model = FrozenCLIPEmbedder(max_length=40,pool=False)
for param in self.clip_model.parameters():
param.requires_grad = True
#if dataset == 'kitti':
# self.text_adapter_ = TextAdapterRefer(text_dim=text_dim)
# self.gamma_ = nn.Parameter(torch.ones(text_dim) * 1e-4)
self.caption_aggregation = caption_aggregation
self.dataset = dataset
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, feats):
x = self.ldm_to_net[0](feats[0])
for i in range(3):
if i > 0:
x = x + self.ldm_to_net[i](feats[i])
x = self.layers[i](x)
x = self.upsample_layers[i](x)
return self.out_conv(x)
def forward(self, x, class_ids=None, img_paths=None):
latents = self.encoder_vq.encode(x).mode()
# add division by std
if self.dataset == 'nyu':
latents = latents / 5.07543
elif self.dataset == 'kitti':
latents = latents / 4.6211
else:
print('Please calculate the STD for the dataset!')
if class_ids is not None:
if self.caption_aggregation:
class_embeddings = self.class_embeddings[[0]*len(class_ids.tolist())]#[class_ids.tolist()]
else:
class_embeddings = []
for img_path in img_paths:
class_embeddings.extend([value['caption'][0] for key, value in self.class_embeddings.items() if key in img_path.replace('//', '/')])
class_embeddings = self.clip_model.encode(class_embeddings)
else:
class_embeddings = self.class_embeddings
c_crossattn = self.text_adapter(latents, class_embeddings, self.gamma)
t = torch.ones((x.shape[0],), device=x.device).long()
#if self.dataset == 'kitti':
# c_crossattn_last = self.text_adapter_(latents, class_embeddings, self.gamma_)
# outs = self.unet(latents, t, c_crossattn=[c_crossattn, c_crossattn_last])
#else:
outs = self.unet(latents, t, c_crossattn=[c_crossattn])
outs = self.aggregation(outs)
feats = [outs[0], outs[1], torch.cat([outs[2], F.interpolate(outs[3], scale_factor=2)], dim=1)]
x = torch.cat([self.layer1(feats[0]), self.layer2(feats[1]), feats[2]], dim=1)
return self.out_layer(x)
def get_latent(self, x):
return self.encoder_vq.encode(x).mode()
class EVPDepth(nn.Module):
def __init__(self, args=None, caption_aggregation=False):
super().__init__()
self.max_depth = args.max_depth
self.min_depth = args.min_depth_eval
embed_dim = 192
channels_in = embed_dim*8
channels_out = embed_dim
if args.dataset == 'nyudepthv2':
self.encoder = EVPDepthEncoder(out_dim=channels_in, dataset='nyu', caption_aggregation=caption_aggregation)
else:
self.encoder = EVPDepthEncoder(out_dim=channels_in, dataset='kitti', caption_aggregation=caption_aggregation)
self.decoder = Decoder(channels_in, channels_out, args)
self.decoder.init_weights() | self.mViT = False | 3 | 2023-12-15 14:13:59+00:00 | 12k |
penghao-wu/vstar | VisualSearch/model/VSM.py | [
{
"identifier": "LlavaLlamaForCausalLM",
"path": "VisualSearch/model/llava/model/language_model/llava_llama.py",
"snippet": "class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM):\n config_class = LlavaConfig\n\n def __init__(self, config):\n super(LlamaForCausalLM, self).__init__(config)\n\n self.model = LlavaLlamaModel(config)\n\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_model(self):\n return self.model\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n images: Optional[torch.FloatTensor] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n (\n input_ids,\n attention_mask,\n past_key_values,\n inputs_embeds,\n labels,\n ) = self.prepare_inputs_labels_for_multimodal(\n input_ids, attention_mask, past_key_values, labels, images\n )\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model/pipeline parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n if self.training:\n output_hidden_states = outputs.hidden_states\n else:\n output_hidden_states = hidden_states\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=output_hidden_states, # outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self,\n input_ids,\n past_key_values=None,\n attention_mask=None,\n inputs_embeds=None,\n images=None,\n **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n \"images\": images,\n }\n )\n return model_inputs"
},
{
"identifier": "LlavaLlamaModel",
"path": "VisualSearch/model/llava/model/language_model/llava_llama.py",
"snippet": "class LlavaLlamaModel(LlavaMetaModel, LlamaModel):\n config_class = LlavaConfig\n\n def __init__(self, config: LlamaConfig):\n super(LlavaLlamaModel, self).__init__(config)"
},
{
"identifier": "MaskDecoder",
"path": "VisualSearch/model/segment_anything/modeling/mask_decoder.py",
"snippet": "class MaskDecoder(nn.Module):\n def __init__(\n self,\n *,\n transformer_dim: int,\n transformer: nn.Module,\n num_multimask_outputs: int = 3,\n activation: Type[nn.Module] = nn.GELU,\n iou_head_depth: int = 3,\n iou_head_hidden_dim: int = 256,\n ) -> None:\n \"\"\"\n Predicts masks given an image and prompt embeddings, using a\n transformer architecture.\n\n Arguments:\n transformer_dim (int): the channel dimension of the transformer\n transformer (nn.Module): the transformer used to predict masks\n num_multimask_outputs (int): the number of masks to predict\n when disambiguating masks\n activation (nn.Module): the type of activation to use when\n upscaling masks\n iou_head_depth (int): the depth of the MLP used to predict\n mask quality\n iou_head_hidden_dim (int): the hidden dimension of the MLP\n used to predict mask quality\n \"\"\"\n super().__init__()\n self.transformer_dim = transformer_dim\n self.transformer = transformer\n\n self.num_multimask_outputs = num_multimask_outputs\n\n self.iou_token = nn.Embedding(1, transformer_dim)\n self.num_mask_tokens = num_multimask_outputs + 1\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\n\n # self.output_upscaling = nn.Sequential(\n # nn.ConvTranspose2d(\n # transformer_dim, transformer_dim // 4, kernel_size=2, stride=2\n # ),\n # LayerNorm2d(transformer_dim // 4),\n # activation(),\n # nn.ConvTranspose2d(\n # transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2\n # ),\n # activation(),\n # )\n\n self.output_upscaling = nn.Sequential(\n Upsample(transformer_dim, transformer_dim // 4),\n LayerNorm2d(transformer_dim // 4),\n activation(),\n Upsample(transformer_dim // 4, transformer_dim // 8),\n activation(),\n )\n\n self.output_hypernetworks_mlps = nn.ModuleList(\n [\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\n for i in range(self.num_mask_tokens)\n ]\n )\n\n self.iou_prediction_head = MLP(\n transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\n )\n\n def forward(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n multimask_output: bool,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks given image and prompt embeddings.\n\n Arguments:\n image_embeddings (torch.Tensor): the embeddings from the image encoder\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\n multimask_output (bool): Whether to return multiple masks or a single\n mask.\n\n Returns:\n torch.Tensor: batched predicted masks\n torch.Tensor: batched predictions of mask quality\n \"\"\"\n masks, iou_pred = self.predict_masks(\n image_embeddings=image_embeddings,\n image_pe=image_pe,\n sparse_prompt_embeddings=sparse_prompt_embeddings,\n dense_prompt_embeddings=dense_prompt_embeddings,\n )\n\n # Select the correct mask or masks for output\n if multimask_output:\n mask_slice = slice(1, None)\n else:\n mask_slice = slice(0, 1)\n masks = masks[:, mask_slice, :, :]\n iou_pred = iou_pred[:, mask_slice]\n\n # Prepare output\n return masks, iou_pred\n\n def predict_masks(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\n # Concatenate output tokens\n output_tokens = torch.cat(\n [self.iou_token.weight, self.mask_tokens.weight], dim=0\n )\n output_tokens = output_tokens.unsqueeze(0).expand(\n sparse_prompt_embeddings.size(0), -1, -1\n )\n\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\n\n # image_embeddings: [1, C, H, W], tokens: [B, N, C]\n # dense_prompt_embeddings: [B, C, H, W]\n # Expand per-image data in batch direction to be per-mask\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\n src = src + dense_prompt_embeddings\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\n b, c, h, w = src.shape\n\n # Run the transformer\n hs, src = self.transformer(src, pos_src, tokens)\n iou_token_out = hs[:, 0, :]\n mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\n\n # Upscale mask embeddings and predict masks using the mask tokens\n src = src.transpose(1, 2).view(b, c, h, w)\n upscaled_embedding = self.output_upscaling(src)\n hyper_in_list: List[torch.Tensor] = []\n for i in range(self.num_mask_tokens):\n hyper_in_list.append(\n self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])\n )\n hyper_in = torch.stack(hyper_in_list, dim=1)\n b, c, h, w = upscaled_embedding.shape\n masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(\n b, self.num_mask_tokens, h, w\n )\n\n # Generate mask quality predictions\n iou_pred = self.iou_prediction_head(iou_token_out)\n\n return masks, iou_pred"
},
{
"identifier": "PromptEncoder",
"path": "VisualSearch/model/segment_anything/modeling/prompt_encoder.py",
"snippet": "class PromptEncoder(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n image_embedding_size: Tuple[int, int],\n input_image_size: Tuple[int, int],\n mask_in_chans: int,\n activation: Type[nn.Module] = nn.GELU,\n ) -> None:\n \"\"\"\n Encodes prompts for input to SAM's mask decoder.\n\n Arguments:\n embed_dim (int): The prompts' embedding dimension\n image_embedding_size (tuple(int, int)): The spatial size of the\n image embedding, as (H, W).\n input_image_size (int): The padded size of the image as input\n to the image encoder, as (H, W).\n mask_in_chans (int): The number of hidden channels used for\n encoding input masks.\n activation (nn.Module): The activation to use when encoding\n input masks.\n \"\"\"\n super().__init__()\n self.embed_dim = embed_dim\n self.input_image_size = input_image_size\n self.image_embedding_size = image_embedding_size\n self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)\n\n self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners\n point_embeddings = [\n nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)\n ]\n self.point_embeddings = nn.ModuleList(point_embeddings)\n self.not_a_point_embed = nn.Embedding(1, embed_dim)\n\n self.mask_input_size = (\n 4 * image_embedding_size[0],\n 4 * image_embedding_size[1],\n )\n self.mask_downscaling = nn.Sequential(\n nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans // 4),\n activation(),\n nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans),\n activation(),\n nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),\n )\n self.no_mask_embed = nn.Embedding(1, embed_dim)\n\n def get_dense_pe(self) -> torch.Tensor:\n \"\"\"\n Returns the positional encoding used to encode point prompts,\n applied to a dense set of points the shape of the image encoding.\n\n Returns:\n torch.Tensor: Positional encoding with shape\n 1x(embed_dim)x(embedding_h)x(embedding_w)\n \"\"\"\n return self.pe_layer(self.image_embedding_size).unsqueeze(0)\n\n def _embed_points(\n self,\n points: torch.Tensor,\n labels: torch.Tensor,\n pad: bool,\n ) -> torch.Tensor:\n \"\"\"Embeds point prompts.\"\"\"\n points = points + 0.5 # Shift to center of pixel\n if pad:\n padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)\n padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\n points = torch.cat([points, padding_point], dim=1)\n labels = torch.cat([labels, padding_label], dim=1)\n point_embedding = self.pe_layer.forward_with_coords(\n points, self.input_image_size\n )\n point_embedding[labels == -1] = 0.0\n point_embedding[labels == -1] += self.not_a_point_embed.weight\n point_embedding[labels == 0] += self.point_embeddings[0].weight\n point_embedding[labels == 1] += self.point_embeddings[1].weight\n return point_embedding\n\n def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds box prompts.\"\"\"\n boxes = boxes + 0.5 # Shift to center of pixel\n coords = boxes.reshape(-1, 2, 2)\n corner_embedding = self.pe_layer.forward_with_coords(\n coords, self.input_image_size\n )\n corner_embedding[:, 0, :] += self.point_embeddings[2].weight\n corner_embedding[:, 1, :] += self.point_embeddings[3].weight\n return corner_embedding\n\n def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds mask inputs.\"\"\"\n mask_embedding = self.mask_downscaling(masks)\n return mask_embedding\n\n def _get_batch_size(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n text_embeds: Optional[torch.Tensor],\n ) -> int:\n \"\"\"\n Gets the batch size of the output given the batch size of the input prompts.\n \"\"\"\n if points is not None:\n return points[0].shape[0]\n elif boxes is not None:\n return boxes.shape[0]\n elif masks is not None:\n return masks.shape[0]\n elif text_embeds is not None:\n return text_embeds.shape[0]\n else:\n return 1\n\n def _get_device(self) -> torch.device:\n return self.point_embeddings[0].weight.device\n\n def forward(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n text_embeds: Optional[torch.Tensor],\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Embeds different types of prompts, returning both sparse and dense\n embeddings.\n\n Arguments:\n points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates\n and labels to embed.\n boxes (torch.Tensor or none): boxes to embed\n masks (torch.Tensor or none): masks to embed\n\n Returns:\n torch.Tensor: sparse embeddings for the points and boxes, with shape\n BxNx(embed_dim), where N is determined by the number of input points\n and boxes.\n torch.Tensor: dense embeddings for the masks, in the shape\n Bx(embed_dim)x(embed_H)x(embed_W)\n \"\"\"\n bs = self._get_batch_size(points, boxes, masks, text_embeds)\n sparse_embeddings = torch.empty(\n (bs, 0, self.embed_dim), device=self._get_device()\n )\n if points is not None:\n coords, labels = points\n point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))\n sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)\n if boxes is not None:\n box_embeddings = self._embed_boxes(boxes)\n sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)\n\n if text_embeds is not None:\n sparse_embeddings = torch.cat([sparse_embeddings, text_embeds], dim=1)\n\n if masks is not None:\n dense_embeddings = self._embed_masks(masks)\n else:\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(\n bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]\n )\n\n return sparse_embeddings, dense_embeddings"
},
{
"identifier": "TwoWayTransformer",
"path": "VisualSearch/model/segment_anything/modeling/transformer.py",
"snippet": "class TwoWayTransformer(nn.Module):\n def __init__(\n self,\n depth: int,\n embedding_dim: int,\n num_heads: int,\n mlp_dim: int,\n activation: Type[nn.Module] = nn.ReLU,\n attention_downsample_rate: int = 2,\n ) -> None:\n \"\"\"\n A transformer decoder that attends to an input image using\n queries whose positional embedding is supplied.\n\n Args:\n depth (int): number of layers in the transformer\n embedding_dim (int): the channel dimension for the input embeddings\n num_heads (int): the number of heads for multihead attention. Must\n divide embedding_dim\n mlp_dim (int): the channel dimension internal to the MLP block\n activation (nn.Module): the activation to use in the MLP block\n \"\"\"\n super().__init__()\n self.depth = depth\n self.embedding_dim = embedding_dim\n self.num_heads = num_heads\n self.mlp_dim = mlp_dim\n self.layers = nn.ModuleList()\n\n for i in range(depth):\n self.layers.append(\n TwoWayAttentionBlock(\n embedding_dim=embedding_dim,\n num_heads=num_heads,\n mlp_dim=mlp_dim,\n activation=activation,\n attention_downsample_rate=attention_downsample_rate,\n skip_first_layer_pe=(i == 0),\n )\n )\n\n self.final_attn_token_to_image = Attention(\n embedding_dim, num_heads, downsample_rate=attention_downsample_rate\n )\n self.norm_final_attn = nn.LayerNorm(embedding_dim)\n\n def forward(\n self,\n image_embedding: Tensor,\n image_pe: Tensor,\n point_embedding: Tensor,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"\n Args:\n image_embedding (torch.Tensor): image to attend to. Should be shape\n B x embedding_dim x h x w for any h and w.\n image_pe (torch.Tensor): the positional encoding to add to the image. Must\n have the same shape as image_embedding.\n point_embedding (torch.Tensor): the embedding to add to the query points.\n Must have shape B x N_points x embedding_dim for any N_points.\n\n Returns:\n torch.Tensor: the processed point_embedding\n torch.Tensor: the processed image_embedding\n \"\"\"\n # BxCxHxW -> BxHWxC == B x N_image_tokens x C\n bs, c, h, w = image_embedding.shape\n image_embedding = image_embedding.flatten(2).permute(0, 2, 1)\n image_pe = image_pe.flatten(2).permute(0, 2, 1)\n\n # Prepare queries\n queries = point_embedding\n keys = image_embedding\n\n # Apply transformer blocks and final layernorm\n for layer in self.layers:\n queries, keys = layer(\n queries=queries,\n keys=keys,\n query_pe=point_embedding,\n key_pe=image_pe,\n )\n\n # Apply the final attention layer from the points to the image\n q = queries + point_embedding\n k = keys + image_pe\n attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)\n queries = queries + attn_out\n queries = self.norm_final_attn(queries)\n\n return queries, keys"
},
{
"identifier": "OwlViT",
"path": "VisualSearch/model/owlvit/owlvit.py",
"snippet": "class OwlViT(nn.Module):\n def __init__(self, num_classes, is_eval=False):\n super().__init__()\n if is_eval:\n owlViT_config = OwlViTConfig.from_pretrained(\"google/owlvit-base-patch16\")\n model_owlViT = OwlViTForObjectDetection(owlViT_config)\n else:\n model_owlViT = OwlViTForObjectDetection.from_pretrained(\"google/owlvit-base-patch16\")\n self.vision_model = model_owlViT.owlvit.vision_model\n self.class_head = model_owlViT.class_head\n self.box_head = model_owlViT.box_head\n self.layer_norm = model_owlViT.layer_norm\n self.sigmoid = nn.Sigmoid()\n del model_owlViT\n\n self.matcher = HungarianMatcher(cost_class=2, cost_bbox=5, cost_giou=2)\n self.weight_dict = {'loss_ce': 2, 'loss_bbox': 5, 'loss_giou': 2}\n\n self.losses = ['labels', 'boxes']\n # num_classes, matcher, weight_dict, losses, focal_alpha=0.25\n self.criterion = SetCriterion(num_classes, self.matcher, self.weight_dict, self.losses, focal_alpha=0.25)\n\n def normalize_grid_corner_coordinates(self, feature_map: torch.FloatTensor):\n # Computes normalized xy corner coordinates from feature_map.\n if not feature_map.ndim == 4:\n raise ValueError(\"Expected input shape is [batch_size, num_patches, num_patches, hidden_dim]\")\n\n device = feature_map.device\n num_patches = feature_map.shape[1]\n\n box_coordinates = np.stack(\n np.meshgrid(np.arange(1, num_patches + 1), np.arange(1, num_patches + 1)), axis=-1\n ).astype(np.float32)\n box_coordinates /= np.array([num_patches, num_patches], np.float32)\n\n # Flatten (h, w, 2) -> (h*w, 2)\n box_coordinates = box_coordinates.reshape(\n box_coordinates.shape[0] * box_coordinates.shape[1], box_coordinates.shape[2]\n )\n box_coordinates = torch.from_numpy(box_coordinates).to(device)\n\n return box_coordinates\n\n def compute_box_bias(self, feature_map: torch.FloatTensor) -> torch.FloatTensor:\n # The box center is biased to its position on the feature grid\n box_coordinates = self.normalize_grid_corner_coordinates(feature_map)\n box_coordinates = torch.clip(box_coordinates, 0.0, 1.0)\n\n # Unnormalize xy\n box_coord_bias = torch.log(box_coordinates + 1e-4) - torch.log1p(-box_coordinates + 1e-4)\n\n # The box size is biased to the patch size\n box_size = torch.full_like(box_coord_bias, 1.0 / feature_map.shape[-2])\n box_size_bias = torch.log(box_size + 1e-4) - torch.log1p(-box_size + 1e-4)\n\n # Compute box bias\n box_bias = torch.cat([box_coord_bias, box_size_bias], dim=-1)\n return box_bias\n\n def box_predictor(\n self,\n image_feats: torch.FloatTensor,\n feature_map: torch.FloatTensor,\n ) -> torch.FloatTensor:\n \"\"\"\n Args:\n image_feats:\n Features extracted from the image, returned by the `image_text_embedder` method.\n feature_map:\n A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method.\n Returns:\n pred_boxes:\n List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary.\n \"\"\"\n # Bounding box detection head [batch_size, num_boxes, 4].\n pred_boxes = self.box_head(image_feats)\n\n # Compute the location of each token on the grid and use it to compute a bias for the bbox prediction\n pred_boxes += self.compute_box_bias(feature_map)\n pred_boxes = self.sigmoid(pred_boxes)\n return pred_boxes\n\n def class_predictor(\n self,\n image_feats: torch.FloatTensor,\n query_embeds: Optional[torch.FloatTensor] = None,\n query_mask: Optional[torch.Tensor] = None,\n ) -> Tuple[torch.FloatTensor]:\n \"\"\"\n Args:\n image_feats:\n Features extracted from the `image_text_embedder`.\n query_embeds:\n Text query embeddings.\n query_mask:\n Must be provided with query_embeddings. A mask indicating which query embeddings are valid.\n \"\"\"\n (pred_logits, image_class_embeds) = self.class_head(image_feats, query_embeds, query_mask)\n\n return (pred_logits, image_class_embeds)\n\n def get_visual_embs(self, image):\n vision_outputs = self.vision_model(\n pixel_values=image,\n output_hidden_states=self.vision_model.config.output_hidden_states,\n return_dict=self.vision_model.config.use_return_dict,\n )\n\n # Get image embeddings\n last_hidden_state = vision_outputs[0]\n image_embeds = self.vision_model.post_layernorm(last_hidden_state)\n\n # Resize class token\n new_size = tuple(np.array(image_embeds.shape) - np.array((0, 1, 0)))\n class_token_out = torch.broadcast_to(image_embeds[:, :1, :], new_size)\n\n # Merge image embedding with class tokens\n image_embeds = image_embeds[:, 1:, :] * class_token_out\n image_embeds = self.layer_norm(image_embeds)\n\n # Resize to [batch_size, num_patches, num_patches, hidden_size]\n new_size = (\n image_embeds.shape[0],\n int(np.sqrt(image_embeds.shape[1])),\n int(np.sqrt(image_embeds.shape[1])),\n image_embeds.shape[-1],\n )\n feature_map = image_embeds.reshape(new_size)\n return feature_map\n\n def forward(\n self,\n image_embeddings: torch.Tensor,\n prompt_embeddings: torch.Tensor,\n ):\n\n feature_map = image_embeddings\n\n batch_size, num_patches, num_patches, hidden_dim = feature_map.shape\n image_feats = torch.reshape(feature_map, (batch_size, num_patches * num_patches, hidden_dim))\n\n query_embeds = prompt_embeddings.reshape(batch_size, 1, prompt_embeddings.shape[-1])\n\n # Predict object classes [batch_size, num_patches, num_queries+1]\n (pred_logits, class_embeds) = self.class_predictor(image_feats, query_embeds)\n\n # Predict object boxes\n pred_boxes = self.box_predictor(image_feats, feature_map)\n\n out = {'pred_logits': pred_logits, 'pred_boxes': pred_boxes}\n return out"
}
] | from typing import List
from VisualSearch.model.llava.model.language_model.llava_llama import (LlavaLlamaForCausalLM,
LlavaLlamaModel)
from .segment_anything.modeling import PromptEncoder, MaskDecoder, TwoWayTransformer
from .owlvit.owlvit import OwlViT
import torch
import torch.nn as nn
import torch.nn.functional as F | 8,069 | num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
loss = loss.flatten(1, 2).mean(1) / (num_masks + 1e-8)
return loss
class VSMMetaModel:
def __init__(
self,
config,
**kwargs,
):
super(VSMMetaModel, self).__init__(config)
self.config = config
if not hasattr(self.config, "train_mask_decoder"):
self.config.train_mask_decoder = kwargs["train_mask_decoder"]
self.config.out_dim = kwargs["out_dim"]
else:
is_eval = kwargs.get('is_eval', False)
self.initialize_lisa_modules(self.config, is_eval)
def initialize_lisa_modules(self, config, is_eval=False):
# OWL-ViT
self.owlvit = OwlViT(1, is_eval)
self.owlvit.train()
for param in self.owlvit.parameters():
param.requires_grad = True
for param in self.owlvit.vision_model.parameters():
param.requires_grad = False
self.owlvit.vision_model.eval()
for param in self.owlvit.box_head.parameters():
param.requires_grad = False
self.visual_projection = nn.Linear(self.owlvit.vision_model.config.hidden_size, 256, bias=False)
for param in self.visual_projection.parameters():
param.requires_grad = True
self.prompt_encoder=PromptEncoder(
embed_dim=256,
image_embedding_size=(48, 48),
input_image_size=(768, 768),
mask_in_chans=16,
)
self.prompt_encoder.train()
for param in self.prompt_encoder.parameters():
param.requires_grad = True
self.mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=256,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=256,
iou_head_depth=3,
iou_head_hidden_dim=256,
)
self.mask_decoder.train()
for param in self.mask_decoder.parameters():
param.requires_grad = True
# Projection layer
in_dim = config.hidden_size
out_dim = config.out_dim
text_fc_det = [
nn.Linear(in_dim, in_dim),
nn.ReLU(inplace=True),
nn.Linear(in_dim, out_dim),
nn.Dropout(0.0),
]
self.text_hidden_fcs_det = nn.ModuleList([nn.Sequential(*text_fc_det)])
self.text_hidden_fcs_det.train()
for param in self.text_hidden_fcs_det.parameters():
param.requires_grad = True
text_fc_seg = [
nn.Linear(in_dim, in_dim),
nn.ReLU(inplace=True),
nn.Linear(in_dim, 256),
nn.Dropout(0.0),
]
self.text_hidden_fcs_seg = nn.ModuleList([nn.Sequential(*text_fc_seg)])
self.text_hidden_fcs_seg.train()
for param in self.text_hidden_fcs_seg.parameters():
param.requires_grad = True
class VSMModel(VSMMetaModel, LlavaLlamaModel):
def __init__(
self,
config,
**kwargs,
):
super(VSMModel, self).__init__(config, **kwargs)
self.config.use_cache = False
self.config.vision_tower = self.config.mm_vision_tower
self.config.mm_vision_select_feature = "patch"
self.config.image_aspect_ratio = "square"
self.config.image_grid_pinpoints = None
self.config.tune_mm_mlp_adapter = False
self.config.freeze_mm_mlp_adapter = True
self.config.pretrain_mm_mlp_adapter = None
self.config.mm_use_im_patch_token = False
|
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
scale=1000, # 100000.0,
eps=1e-6,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1, 2)
targets = targets.flatten(1, 2)
numerator = 2 * (inputs / scale * targets).sum(-1)
denominator = (inputs / scale).sum(-1) + (targets / scale).sum(-1)
loss = 1 - (numerator + eps) / (denominator + eps)
loss = loss / (num_masks + 1e-8)
return loss
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
loss = loss.flatten(1, 2).mean(1) / (num_masks + 1e-8)
return loss
class VSMMetaModel:
def __init__(
self,
config,
**kwargs,
):
super(VSMMetaModel, self).__init__(config)
self.config = config
if not hasattr(self.config, "train_mask_decoder"):
self.config.train_mask_decoder = kwargs["train_mask_decoder"]
self.config.out_dim = kwargs["out_dim"]
else:
is_eval = kwargs.get('is_eval', False)
self.initialize_lisa_modules(self.config, is_eval)
def initialize_lisa_modules(self, config, is_eval=False):
# OWL-ViT
self.owlvit = OwlViT(1, is_eval)
self.owlvit.train()
for param in self.owlvit.parameters():
param.requires_grad = True
for param in self.owlvit.vision_model.parameters():
param.requires_grad = False
self.owlvit.vision_model.eval()
for param in self.owlvit.box_head.parameters():
param.requires_grad = False
self.visual_projection = nn.Linear(self.owlvit.vision_model.config.hidden_size, 256, bias=False)
for param in self.visual_projection.parameters():
param.requires_grad = True
self.prompt_encoder=PromptEncoder(
embed_dim=256,
image_embedding_size=(48, 48),
input_image_size=(768, 768),
mask_in_chans=16,
)
self.prompt_encoder.train()
for param in self.prompt_encoder.parameters():
param.requires_grad = True
self.mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=256,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=256,
iou_head_depth=3,
iou_head_hidden_dim=256,
)
self.mask_decoder.train()
for param in self.mask_decoder.parameters():
param.requires_grad = True
# Projection layer
in_dim = config.hidden_size
out_dim = config.out_dim
text_fc_det = [
nn.Linear(in_dim, in_dim),
nn.ReLU(inplace=True),
nn.Linear(in_dim, out_dim),
nn.Dropout(0.0),
]
self.text_hidden_fcs_det = nn.ModuleList([nn.Sequential(*text_fc_det)])
self.text_hidden_fcs_det.train()
for param in self.text_hidden_fcs_det.parameters():
param.requires_grad = True
text_fc_seg = [
nn.Linear(in_dim, in_dim),
nn.ReLU(inplace=True),
nn.Linear(in_dim, 256),
nn.Dropout(0.0),
]
self.text_hidden_fcs_seg = nn.ModuleList([nn.Sequential(*text_fc_seg)])
self.text_hidden_fcs_seg.train()
for param in self.text_hidden_fcs_seg.parameters():
param.requires_grad = True
class VSMModel(VSMMetaModel, LlavaLlamaModel):
def __init__(
self,
config,
**kwargs,
):
super(VSMModel, self).__init__(config, **kwargs)
self.config.use_cache = False
self.config.vision_tower = self.config.mm_vision_tower
self.config.mm_vision_select_feature = "patch"
self.config.image_aspect_ratio = "square"
self.config.image_grid_pinpoints = None
self.config.tune_mm_mlp_adapter = False
self.config.freeze_mm_mlp_adapter = True
self.config.pretrain_mm_mlp_adapter = None
self.config.mm_use_im_patch_token = False
| class VSMForCausalLM(LlavaLlamaForCausalLM): | 0 | 2023-12-15 14:58:24+00:00 | 12k |
worm128/AI-YinMei | text-generation-webui/extensions/Training_PRO/script.py | [
{
"identifier": "FPSchedulerTrainer",
"path": "text-generation-webui/extensions/Training_PRO/custom_scheduler.py",
"snippet": "class FPSchedulerTrainer(transformers.Trainer):\n def __init__(self,neftune_noise_alpha:float = 0.0, model = None, *args, **kwargs):\n self.neftune_noise_alpha = neftune_noise_alpha\n if self.neftune_noise_alpha > 0.0:\n model = self._activate_neftune(model)\n super().__init__(model = model, *args, **kwargs)\n\n \n def _activate_neftune(self, model):\n r\"\"\"\n Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://arxiv.org/abs/2310.05914\n \"\"\"\n print(f\"Activating {RED}NEFtune{RESET} with scale: {self.neftune_noise_alpha}\")\n if isinstance(model, transformers.PreTrainedModel):\n embeddings = model.get_input_embeddings()\n elif isinstance(model, PeftModel):\n embeddings = model.base_model.get_input_embeddings()\n\n embeddings.neftune_noise_alpha = self.neftune_noise_alpha\n old_forward = embeddings.forward\n\n # This hack seems to be needed to properly use a custom forward pass\n # all credits to: https://discuss.pytorch.org/t/how-can-i-replace-the-forward-method-of-a-predefined-torchvision-model-with-my-customized-forward-function/54224/11\n bound_method = neftune_forward.__get__(embeddings, embeddings.__class__)\n setattr(embeddings, \"forward\", bound_method)\n\n # embeddings.forward = neftune_forward\n embeddings._trl_old_forward = old_forward\n\n return model\n \n def train(self, *args, **kwargs):\n output = super().train(*args, **kwargs)\n\n # After training we make sure to retrieve back the original forward pass method\n # for the embedding layer\n if self.neftune_noise_alpha is not None:\n\n if isinstance(self.model, transformers.PreTrainedModel):\n embeddings = self.model.get_input_embeddings()\n elif isinstance(self.model, PeftModel):\n embeddings = self.model.base_model.get_input_embeddings()\n\n if hasattr(embeddings, \"_trl_old_forward\"):\n embeddings.forward = embeddings._trl_old_forward\n del embeddings._trl_old_forward\n del embeddings.neftune_noise_alpha\n\n return output\n\n\n def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):\n #Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument.\n \n num_train_epochs = self.args.num_train_epochs\n num_warmup_steps=self.args.get_warmup_steps(num_training_steps)\n num_firstepoch_steps = math.ceil(num_training_steps/num_train_epochs)\n num_warmup_acc = num_warmup_steps*self.args.gradient_accumulation_steps \n num_firstepoch_steps_acc = num_firstepoch_steps*self.args.gradient_accumulation_steps\n num_training_steps_acc = num_training_steps*self.args.gradient_accumulation_steps\n\n custom_scheduler_params.update({'dynamic_scheduler_stop': False})\n \n print (f\"Warm-up steps aligned to Gradient accumulation ({self.args.gradient_accumulation_steps}) = {num_warmup_acc} actual warmup steps\")\n if self.args.lr_scheduler_type == 'cosine':\n \n num_warmup_acc_min = min(num_warmup_acc, num_firstepoch_steps_acc)\n\n if num_warmup_acc>num_firstepoch_steps_acc:\n print(f\"\\033[1;31;1mWARNING: The number of warmup steps is set too high! It will be clamped to 1 epoch, essentially going from warmup to annealing.\\033[0;37;0m\")\n print (f\"FP Scheduler Warmup: 0-[{num_warmup_acc_min}], Hold [{num_warmup_acc_min}]-{num_firstepoch_steps_acc}, Annealing {num_firstepoch_steps_acc}-{num_training_steps_acc}\")\n else:\n print (f\"FP Scheduler Warmup: 0-{num_warmup_acc_min}, Hold {num_warmup_acc_min}-{num_firstepoch_steps_acc}, Annealing {num_firstepoch_steps_acc}-{num_training_steps_acc}\")\n\n self.lr_scheduler = custom_cosine_scheduler_with_warmup(\n optimizer=self.optimizer if optimizer is None else optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps, \n num_firstepoch_steps = num_firstepoch_steps,\n )\n self._created_lr_scheduler = True\n return self.lr_scheduler\n elif self.args.lr_scheduler_type == 'constant':\n \n half_step_acc = num_training_steps_acc//2\n num_warmup_acc_min = min(num_warmup_acc, half_step_acc)\n\n if num_warmup_acc>half_step_acc:\n print(f\"\\033[1;31;1mWARNING: The number of warmup steps is set too high! It will be clamped to half of all epochs, essentially going from warmup to annealing in the middle.\\033[0;37;0m\")\n print (f\"FP Scheduler Warmup: 0-[{num_warmup_acc_min}], Hold [{num_warmup_acc_min}]-{half_step_acc}, Annealing {half_step_acc}-{num_training_steps_acc}\")\n else:\n print (f\"FP Scheduler Warmup: 0-{num_warmup_acc_min}, Hold {num_warmup_acc_min}-{half_step_acc}, Annealing {half_step_acc}-{num_training_steps_acc}\")\n\n self.lr_scheduler = custom_half_scheduler_with_warmup(\n optimizer=self.optimizer if optimizer is None else optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps, \n num_firstepoch_steps = num_firstepoch_steps,\n )\n self._created_lr_scheduler = True\n return self.lr_scheduler\n elif self.args.lr_scheduler_type == 'constant_with_warmup':\n \n half_step_acc = num_training_steps_acc//2\n \n if num_warmup_steps>0:\n print(f\"Warmup doesn't apply to this scheduler [Raise-Fall]\")\n\n print (f\"Scheduler Raise: 0-{half_step_acc}, Fall {half_step_acc}-{num_training_steps_acc}\")\n\n self.lr_scheduler = custom_raise_fall_scheduler_with_warmup(\n optimizer=self.optimizer if optimizer is None else optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps, \n num_firstepoch_steps = num_firstepoch_steps,\n )\n self._created_lr_scheduler = True\n return self.lr_scheduler \n else:\n return super().create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)"
},
{
"identifier": "FPNEFtuneTrainer",
"path": "text-generation-webui/extensions/Training_PRO/custom_scheduler.py",
"snippet": "class FPNEFtuneTrainer(transformers.Trainer):\n def __init__(self,neftune_noise_alpha:float = 0.0, model = None, *args, **kwargs):\n self.neftune_noise_alpha = neftune_noise_alpha\n if self.neftune_noise_alpha > 0.0:\n model = self._activate_neftune(model)\n super().__init__(model = model, *args, **kwargs)\n\n \n def _activate_neftune(self, model):\n r\"\"\"\n Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://arxiv.org/abs/2310.05914\n \"\"\"\n print(f\"Activating {RED}NEFtune{RESET} with scale: {self.neftune_noise_alpha}\")\n if isinstance(model, transformers.PreTrainedModel):\n embeddings = model.get_input_embeddings()\n elif isinstance(model, PeftModel):\n embeddings = model.base_model.get_input_embeddings()\n\n embeddings.neftune_noise_alpha = self.neftune_noise_alpha\n old_forward = embeddings.forward\n\n # This hack seems to be needed to properly use a custom forward pass\n # all credits to: https://discuss.pytorch.org/t/how-can-i-replace-the-forward-method-of-a-predefined-torchvision-model-with-my-customized-forward-function/54224/11\n bound_method = neftune_forward.__get__(embeddings, embeddings.__class__)\n setattr(embeddings, \"forward\", bound_method)\n\n # embeddings.forward = neftune_forward\n embeddings._trl_old_forward = old_forward\n\n return model\n \n def train(self, *args, **kwargs):\n output = super().train(*args, **kwargs)\n\n # After training we make sure to retrieve back the original forward pass method\n # for the embedding layer\n if self.neftune_noise_alpha is not None:\n\n if isinstance(self.model, transformers.PreTrainedModel):\n embeddings = self.model.get_input_embeddings()\n elif isinstance(self.model, PeftModel):\n embeddings = self.model.base_model.get_input_embeddings()\n\n if hasattr(embeddings, \"_trl_old_forward\"):\n embeddings.forward = embeddings._trl_old_forward\n del embeddings._trl_old_forward\n del embeddings.neftune_noise_alpha\n\n return output"
},
{
"identifier": "create_graph",
"path": "text-generation-webui/extensions/Training_PRO/matplotgraph.py",
"snippet": "def create_graph(lora_path, lora_name):\n try:\n import matplotlib.pyplot as plt\n from matplotlib.ticker import ScalarFormatter\n \n peft_model_path = f'{lora_path}/training_graph.json'\n image_model_path = f'{lora_path}/training_graph.png'\n # Check if the JSON file exists\n if os.path.exists(peft_model_path):\n # Load data from JSON file\n with open(peft_model_path, 'r') as file:\n data = json.load(file)\n # Extract x, y1, and y2 values\n x = [item['epoch'] for item in data]\n y1 = [item['learning_rate'] for item in data]\n y2 = [item['loss'] for item in data]\n\n # Create the line chart\n fig, ax1 = plt.subplots(figsize=(10, 6))\n \n\n # Plot y1 (learning rate) on the first y-axis\n ax1.plot(x, y1, 'b-', label='Learning Rate')\n ax1.set_xlabel('Epoch')\n ax1.set_ylabel('Learning Rate', color='b')\n ax1.tick_params('y', colors='b')\n\n # Create a second y-axis\n ax2 = ax1.twinx()\n\n # Plot y2 (loss) on the second y-axis\n ax2.plot(x, y2, 'r-', label='Loss')\n ax2.set_ylabel('Loss', color='r')\n ax2.tick_params('y', colors='r')\n\n # Set the y-axis formatter to display numbers in scientific notation\n ax1.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))\n ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n\n # Add grid\n ax1.grid(True)\n\n # Combine the legends for both plots\n lines, labels = ax1.get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n ax2.legend(lines + lines2, labels + labels2, loc='best')\n\n # Set the title\n plt.title(f'{lora_name} LR and Loss vs Epoch')\n\n # Save the chart as an image\n plt.savefig(image_model_path)\n\n print(f\"Graph saved in {image_model_path}\")\n else:\n print(f\"File 'training_graph.json' does not exist in the {lora_path}\")\n \n except ImportError:\n print(\"matplotlib is not installed. Please install matplotlib to create PNG graphs\")"
},
{
"identifier": "get_available_loras_local",
"path": "text-generation-webui/extensions/Training_PRO/train_utils.py",
"snippet": "def get_available_loras_local(_sortedByTime):\n \n model_dir = shared.args.lora_dir # Update with the appropriate directory path\n subfolders = []\n if _sortedByTime:\n subfolders = list_subfoldersByTime(model_dir)\n else:\n subfolders = utils.get_available_loras() \n\n return subfolders"
},
{
"identifier": "precise_cut",
"path": "text-generation-webui/extensions/Training_PRO/train_utils.py",
"snippet": "def precise_cut(text: str, overlap: bool, min_chars_cut: int, eos_to_hc: bool, cutoff_len: int, hard_cut_string: str, debug_slicer:bool):\n\n EOSX_str = '<//>' #hardcut placeholder\n EOS_str = '</s>' \n print(\"Precise raw text slicer: ON\")\n \n cut_string = hard_cut_string.replace('\\\\n', '\\n')\n text = text.replace(cut_string, EOSX_str)\n sentences = split_sentences(text, cutoff_len)\n\n print(f\"Sentences: {len(sentences)}\")\n sentencelist = []\n currentSentence = ''\n totalLength = 0\n max_cut = cutoff_len-1\n half_cut = cutoff_len//2\n halfcut_length = 0\n\n edgeindex = []\n half_index = 0\n\n for index, item in enumerate(sentences):\n \n if halfcut_length+ item['size'] < half_cut:\n halfcut_length += item['size']\n half_index = index\n else:\n edgeindex.append(half_index)\n halfcut_length = -2 * max_cut\n\n\n if totalLength + item['size'] < max_cut and not currentSentence.endswith(EOSX_str): \n currentSentence += item['text']\n totalLength += item['size']\n else:\n\n if len(currentSentence.strip()) > min_chars_cut:\n sentencelist.append(currentSentence.strip())\n\n currentSentence = item['text']\n totalLength = item['size']\n halfcut_length = item['size']\n \n if len(currentSentence.strip()) > min_chars_cut: \n sentencelist.append(currentSentence.strip())\n\n unique_blocks = len(sentencelist)\n print(f\"Text Blocks: {unique_blocks}\")\n\n #overlap strategies: \n # don't overlap across HARD CUT (EOSX)\n if overlap:\n for edge_idx in edgeindex:\n currentSentence = ''\n totalLength = 0\n\n for item in sentences[edge_idx:]:\n if totalLength + item['size'] < max_cut:\n currentSentence += item['text']\n totalLength += item['size']\n else:\n #if by chance EOSX is at the end then it's acceptable\n if currentSentence.endswith(EOSX_str) and len(currentSentence.strip()) > min_chars_cut:\n sentencelist.append(currentSentence.strip()) \n # otherwise don't cross hard cut \n elif EOSX_str not in currentSentence and len(currentSentence.strip()) > min_chars_cut:\n sentencelist.append(currentSentence.strip())\n \n currentSentence = ''\n totalLength = 0\n break\n \n print(f\"+ Overlapping blocks: {len(sentencelist)-unique_blocks}\")\n\n num_EOS = 0\n for i in range(len(sentencelist)):\n if eos_to_hc:\n sentencelist[i] = sentencelist[i].replace(EOSX_str, EOS_str)\n else:\n sentencelist[i] = sentencelist[i].replace(EOSX_str, '')\n \n #someone may have had stop strings in the raw text...\n sentencelist[i] = sentencelist[i].replace(\"</s></s>\", EOS_str)\n num_EOS += sentencelist[i].count(EOS_str)\n\n if num_EOS > 0:\n print(f\"+ EOS count: {num_EOS}\")\n\n #final check for useless lines\n sentencelist = [item for item in sentencelist if item.strip() != \"</s>\"]\n sentencelist = [item for item in sentencelist if item.strip() != \"\"]\n\n\n if debug_slicer:\n # Write the log file\n Path('logs').mkdir(exist_ok=True)\n sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)}\n output_file = \"logs/sentencelist.json\"\n with open(output_file, 'w') as f:\n json.dump(sentencelist_dict, f,indent=2)\n \n print(\"Saved sentencelist.json in logs folder\")\n \n return sentencelist "
},
{
"identifier": "sliding_block_cut",
"path": "text-generation-webui/extensions/Training_PRO/train_utils.py",
"snippet": "def sliding_block_cut(text: str, min_chars_cut: int, eos_to_hc: bool, cutoff_len: int, hard_cut_string: str, debug_slicer:bool):\n\n EOSX_str = '<//>' #hardcut placeholder\n EOS_str = '</s>' \n print(\"Mega Block Overlap: ON\")\n \n cut_string = hard_cut_string.replace('\\\\n', '\\n')\n text = text.replace(cut_string, EOSX_str)\n sentences = split_sentences(text, cutoff_len)\n\n print(f\"Sentences: {len(sentences)}\")\n sentencelist = []\n \n max_cut = cutoff_len-1\n\n #print(f\"max_cut: {max_cut}\")\n advancing_to = 0\n\n prev_block_lastsentence = \"\"\n \n\n for i in range(len(sentences)):\n totalLength = 0\n currentSentence = ''\n lastsentence = \"\"\n \n if i >= advancing_to:\n for k in range(i, len(sentences)):\n \n current_length = sentences[k]['size']\n\n if totalLength + current_length <= max_cut and not currentSentence.endswith(EOSX_str):\n currentSentence += sentences[k]['text']\n totalLength += current_length\n lastsentence = sentences[k]['text']\n else:\n if len(currentSentence.strip()) > min_chars_cut:\n if prev_block_lastsentence!=lastsentence:\n sentencelist.append(currentSentence.strip())\n prev_block_lastsentence = lastsentence\n \n advancing_to = 0\n if currentSentence.endswith(EOSX_str):\n advancing_to = k\n\n currentSentence = \"\"\n totalLength = 0\n break\n \n if currentSentence != \"\":\n if len(currentSentence.strip()) > min_chars_cut:\n sentencelist.append(currentSentence.strip())\n\n unique_blocks = len(sentencelist)\n print(f\"Text Blocks: {unique_blocks}\")\n num_EOS = 0\n for i in range(len(sentencelist)):\n if eos_to_hc:\n sentencelist[i] = sentencelist[i].replace(EOSX_str, EOS_str)\n else:\n sentencelist[i] = sentencelist[i].replace(EOSX_str, '')\n \n #someone may have had stop strings in the raw text...\n sentencelist[i] = sentencelist[i].replace(\"</s></s>\", EOS_str)\n num_EOS += sentencelist[i].count(EOS_str)\n\n if num_EOS > 0:\n print(f\"+ EOS count: {num_EOS}\")\n\n #final check for useless lines\n sentencelist = [item for item in sentencelist if item.strip() != \"</s>\"]\n sentencelist = [item for item in sentencelist if item.strip() != \"\"]\n\n\n if debug_slicer:\n # Write the log file\n Path('logs').mkdir(exist_ok=True)\n sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)}\n output_file = \"logs/sentencelist.json\"\n with open(output_file, 'w') as f:\n json.dump(sentencelist_dict, f,indent=2)\n \n print(\"Saved sentencelist.json in logs folder\")\n \n return sentencelist "
},
{
"identifier": "download_file_from_url",
"path": "text-generation-webui/extensions/Training_PRO/train_utils.py",
"snippet": "def download_file_from_url(url, overwrite, output_dir_in, valid_extensions = {'.txt', '.json'}):\n try:\n # Validate and sanitize the URL\n #parsed_url = urllib.parse.urlparse(url)\n #if not parsed_url.netloc:\n # raise ValueError(\"Invalid URL\")\n #filename = os.path.basename(parsed_url.path)\n\n # Get the filename from the URL\n\n session = requests.Session()\n headers = {}\n mode = 'wb'\n filename = url.split('/')[-1]\n\n output_dir = str(output_dir_in)\n # Construct the full path to the output file\n local_filename = os.path.join(output_dir, filename)\n\n # Check if the local file already exists\n overw = ''\n if os.path.exists(local_filename):\n if not overwrite:\n yield f\"File '{local_filename}' already exists. Aborting.\"\n return\n else:\n overw = ' [Overwrite existing]'\n\n filename_lower = filename.lower()\n\n # Send an HTTP GET request to the URL with a timeout\n file_extension = os.path.splitext(filename_lower)[-1]\n \n if file_extension not in valid_extensions:\n yield f\"Invalid file extension: {file_extension}. Only {valid_extensions} files are supported.\"\n return\n\n with session.get(url, stream=True, headers=headers, timeout=10) as r:\n r.raise_for_status() \n # total size can be wildly inaccurate\n #total_size = int(r.headers.get('content-length', 0))\n \n block_size = 1024 * 4 \n with open(local_filename, mode) as f:\n count = 0\n for data in r.iter_content(block_size):\n f.write(data)\n count += len(data)\n\n yield f\"Downloaded: {count} \" + overw\n\n # Verify file size if possible\n if os.path.exists(local_filename):\n downloaded_size = os.path.getsize(local_filename)\n if downloaded_size > 0:\n yield f\"File '{filename}' downloaded to '{output_dir}' ({downloaded_size} bytes).\"\n print(\"File Downloaded\")\n else:\n print(\"Downloaded file is zero\")\n yield f\"Failed. Downloaded file size is zero).\"\n else:\n print(f\"Error: {local_filename} failed to download.\")\n yield f\"Error: {local_filename} failed to download\"\n\n except Exception as e:\n print(f\"An error occurred: {e}\")\n yield f\"An error occurred: {e}\"\n\n finally:\n # Close the session to release resources\n session.close()"
}
] | import os
import json
import math
import random
import shutil
import sys
import threading
import time
import traceback
import gradio as gr
import pandas as pd
import torch
import transformers
import inspect
from datetime import datetime
from pathlib import Path
from functools import partial
from .custom_scheduler import FPSchedulerTrainer, FPNEFtuneTrainer
from .matplotgraph import create_graph
from .train_utils import get_available_loras_local, precise_cut, sliding_block_cut, download_file_from_url
from datasets import Dataset, load_dataset
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_kbit_training,
set_peft_model_state_dict
)
from peft.utils.other import \
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING as model_to_lora_modules
from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
)
from modules import shared, utils
from modules.ui import create_refresh_button
from modules.evaluate import (
calculate_perplexity,
generate_markdown_table,
save_past_evaluations
)
from modules.logging_colors import logger
from modules.models import reload_model
from modules.utils import natural_keys
from typing import Callable, Optional, Tuple, ContextManager
from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import (
replace_peft_model_with_int4_lora_model
)
from alpaca_lora_4bit.autograd_4bit import Autograd4bitQuantLinear
from alpaca_lora_4bit.models import Linear4bitLt | 9,883 | text_chunks = precise_cut(raw_text, precize_slicing_overlap, min_chars, False, cutoff_len, hard_cut_string,non_serialized_params['debug_slicer'])
total_blocks = len(text_chunks)
result = f"Text: ({raw_text_file}.txt) has {total_blocks} blocks (Block Size {cutoff_len} tokens)"
del text_chunks
else:
if dataset in ['None', '']:
yield "Select dataset or text file."
return
if format in ['None', '']:
yield "Select format choice for dataset."
return
with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
format_data: dict[str, str] = json.load(formatFile)
def generate_prompt(data_point: dict[str, str]):
for options, data in format_data.items():
if set(options.split(',')) == set(x[0] for x in data_point.items() if (type(x[1]) is str and len(x[1].strip()) > 0)):
for key, val in data_point.items():
if type(val) is str:
data = data.replace(f'%{key}%', val)
return data
raise RuntimeError(f'Data-point "{data_point}" has no keyset match within format "{list(format_data.keys())}"')
def tokenize_dummy(prompt):
input_ids = shared.tokenizer.encode(prompt, truncation=True, max_length=cutoff_len)
labels = [1] * len(input_ids)
input_ids = torch.tensor(input_ids)
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": input_ids.ne(shared.tokenizer.pad_token_id),
}
def generate_and_tokenize_prompt(data_point):
prompt = generate_prompt(data_point)
return tokenize_dummy(prompt)
logger.info("Loading JSON datasets...")
data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json'))
data_keys = []
if data:
if 'train' in data: # Check if the 'train' split exists in the dataset
data_keys = list(data['train'][0].keys())
print("Data Keys:", data_keys)
else:
print("The dataset is empty.")
train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
total_blocks = train_data.num_rows
result = f"Dataset: ({dataset}.json) has {total_blocks} blocks @ length = {cutoff_len} tokens\n(Keys: {data_keys} - Format: {format}.json): "
#for options, data in format_data.items():
# format_keys = options.split(',')
# result += f"{format_keys}, "
#result = result.rstrip()
#result = result.rstrip(',')
if total_blocks>0:
number_ofSteps = int(math.ceil(total_blocks / micro_batch_size) * epochs)
num_stepsPer_epoch = int(math.ceil(number_ofSteps/epochs))
min_warm = math.ceil(100 / grad_accumulation)
warmup_steps_suggest = min(int(min_warm*grad_accumulation), int(math.ceil(number_ofSteps * 0.1)))
warmup_steps_suggest = min(warmup_steps_suggest,num_stepsPer_epoch)
save_each_n_min = int(math.ceil(number_ofSteps/10))
save_each_n_max = int(math.ceil(number_ofSteps/5))
gradient_accumulation_max = int(total_blocks)//micro_batch_size
result += f"\n[Batch Size: {micro_batch_size}, Epochs: {epochs}, Gradient Accumulation: {grad_accumulation}]\n"
result += f"Total number of steps: {number_ofSteps}\n"
result += f"Steps per each Epoch: {num_stepsPer_epoch}\n"
result += f"Suggestions:\n"
result += f"Checkpoints: Save every {save_each_n_min} - {save_each_n_max} steps (Current: {int(save_steps)})\n"
result += f"Warmup steps: {warmup_steps_suggest} (Current: {int(warmup_steps)})"
if gradient_accumulation_max < grad_accumulation:
result += f"\n\nWARNING: Gradient Accumulation {grad_accumulation} is too high: It should be below {gradient_accumulation_max}"
yield result
return
check_dataset_btn.click(check_dataset, dataset_calc_params ,check_dataset_txt)
# Evaluation events. For some reason, the interrupt event
# doesn't work with the .then() syntax, so I write them one
# by one in this ugly but functional way.
ev = start_evaluation.click(calculate_perplexity, [models, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
start_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False)
start_current_evaluation.click(lambda: ['current model'], None, tmp)
ev_cur = start_current_evaluation.click(calculate_perplexity, [tmp, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
start_current_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False)
stop_evaluation.click(None, None, None, cancels=[ev, ev_cur], queue=False)
refresh_table.click(generate_markdown_table, None, evaluation_table, show_progress=True)
save_comments.click(
save_past_evaluations, evaluation_table, None).then(
lambda: "Comments saved.", None, evaluation_log, show_progress=False)
def reload_lora():
return gr.Dropdown.update(choices=get_available_loras_local(non_serialized_params['Lora_sortedByTime']))
# nonserialized items
sort_byTime.change(lambda x: non_serialized_params.update({"Lora_sortedByTime": x}), sort_byTime, None).then(reload_lora,None,copy_from)
#debug_slicer.change(lambda x: non_serialized_params.update({"debug_slicer": x}), debug_slicer, None)
def update_dataset():
return gr.update(choices=get_datasets('training/datasets', 'json')), gr.update(choices=get_datasets('training/datasets', 'txt'))
|
os.environ["WANDB_MODE"] = "offline"
# os.environ["WANDB_DISABLED"] = "true"
## just temporary to avoid warning
if hasattr(torch.utils.checkpoint, 'noop_context_fn'):
def my_checkpoint(
function,
*args,
use_reentrant: Optional[bool] = None,
context_fn: Callable[[], Tuple[ContextManager, ContextManager]] = torch.utils.checkpoint.noop_context_fn,
determinism_check: str = torch.utils.checkpoint._DEFAULT_DETERMINISM_MODE,
debug: bool = False,
**kwargs
):
if use_reentrant is None:
#print ("reentran = NONE")
use_reentrant = True
# Hack to mix *args with **kwargs in a python 2.7-compliant way
preserve = kwargs.pop("preserve_rng_state", True)
if kwargs and use_reentrant:
raise ValueError(
"Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)
)
if use_reentrant:
if context_fn is not torch.utils.checkpoint.noop_context_fn or debug is not False:
raise ValueError(
"Passing `context_fn` or `debug` is only supported when "
"use_reentrant=False."
)
return torch.utils.checkpoint.CheckpointFunction.apply(function, preserve, *args)
else:
print ("reentran = FALSE")
gen = torch.utils.checkpoint._checkpoint_without_reentrant_generator(
function, preserve, context_fn, determinism_check, debug, *args, **kwargs
)
# Runs pre-forward logic
next(gen)
ret = function(*args, **kwargs)
# Runs post-forward logic
try:
next(gen)
except StopIteration:
return ret
params = {
"display_name": "Training PRO",
"is_tab": True
}
non_serialized_params = {
"debug_slicer": False,
"Lora_sortedByTime": False,
"stop_at_loss": 0,
"save_steps_under_loss": 0.0,
"save_checkpoint_now": False,
"training_loop": False,
"current_stability": 0,
"save_epochs": 0,
"checkpoint_offset": 0,
"epoch_offset":0,
}
MODEL_CLASSES = {v[1]: v[0] for v in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.items()}
PARAMETERS = ["lora_name", "always_override", "save_steps", "micro_batch_size", "batch_size", "epochs", "learning_rate", "lr_scheduler_type", "lora_rank", "lora_alpha", "lora_dropout", "cutoff_len", "dataset", "eval_dataset", "format", "eval_steps", "raw_text_file", "higher_rank_limit", "warmup_steps", "optimizer", "hard_cut_string", "train_only_after", "stop_at_loss", "add_eos_token", "min_chars", "report_to", "precize_slicing_overlap", "add_eos_token_type", "save_steps_under_loss", "add_bos_token", "training_projection","sliding_window","warmup_ratio","grad_accumulation","neft_noise_alpha"]
WANT_INTERRUPT = False
train_log = {}
train_template = {}
train_log_graph = []
train_choices = ["all","q-k-v-o","q-k-v","k-v-down","q-v"]
statistics = {
'loss': [],
'lr': [],
}
RED = "\033[91m"
YELLOW = "\033[93m"
GREEN = "\033[92m"
RESET = "\033[0m"
def ui():
with gr.Tab('Train LoRA', elem_id='lora-train-tab'):
tmp = gr.State('')
with gr.Row():
with gr.Column():
# YY.MM.DD
gr.Markdown("`Ver: 23.10.20` This is enhanced version of QLora Training. [Maintained by FP](https://github.com/FartyPants/Training_PRO/tree/main)")
with gr.Row():
with gr.Column(scale=5):
with gr.Row():
copy_from = gr.Dropdown(label='Copy parameters from', value='None', choices=get_available_loras_local(non_serialized_params['Lora_sortedByTime']), elem_classes=['slim-dropdown'])
create_refresh_button(copy_from, lambda: None, lambda: {'choices': get_available_loras_local(non_serialized_params['Lora_sortedByTime'])}, 'refresh-button')
with gr.Column():
sort_byTime = gr.Checkbox(label='Sort list by Date', value=False, info='Sorts Loras by date created.', elem_classes=['no-background'])
with gr.Row():
with gr.Column(scale=5):
lora_name = gr.Textbox(label='Name', info='The name of your new LoRA file')
with gr.Column():
always_override = gr.Checkbox(label='Override Existing Files', value=False, info='If the name is the same, checking will replace the existing file, and unchecking will load and continue from it (the rank must be the same).', elem_classes=['no-background'])
with gr.Row():
with gr.Column():
lora_rank = gr.Slider(label='LoRA Rank', value=32, minimum=0, maximum=1024, step=4, info='Also called dimension count. Higher values = larger file, more content control. Smaller values = smaller file, less control. Use 4 or 8 for style, 128 or 256 to teach, 1024+ for fine-detail on big data. More VRAM is needed for higher ranks.')
lora_alpha = gr.Slider(label='LoRA Alpha', value=64, minimum=0, maximum=2048, step=4, info='This divided by the rank becomes the scaling of the LoRA. Higher means stronger. A good standard value is twice your Rank.')
batch_size = gr.Slider(visible= False, label='Batch Size', value=0, minimum=0, maximum=1024, step=4, info='Now Replaced with Gradient accumulation. Keeping it for sake of old saved data')
micro_batch_size = gr.Slider(label='True Batch Size', value=4, minimum=1, maximum=128, step=1, info='Specifies how many text blocks per step will be trained. The higher value, the better the concept of training will be, but it requires more GPU memory and it reduces speed.')
grad_accumulation = gr.Slider(label='Gradient Accumulation Steps', value=1, minimum=1, maximum=256, step=1, info="Virtually multiplies the Batch Size by averaging the learning over more than one step. VRAM friendly. Evens out loss fluctuations but can also degrade training fidelity.")
with gr.Column():
stop_at_loss = gr.Slider(label='Stop at loss (Can be changed during training)', minimum=0.0, maximum=3.0, step=0.1, value=0.00, info='The process will automatically stop once the desired loss value is reached.')
gr.Markdown(" ")
epochs = gr.Number(label='Epochs', value=3, info='Number of times every entry in the dataset should be fed into training. So 1 means feed each item in once, 5 means feed it in five times, etc.')
learning_rate = gr.Textbox(label='Learning Rate', value='3e-4', info='In scientific notation. 3e-4 is a good starting base point. 1e-2 is extremely high, 1e-6 is extremely low.')
lr_scheduler_type = gr.Dropdown(label='LR Scheduler', value='linear', choices=['linear', 'constant', 'constant_with_warmup', 'cosine', 'cosine_with_restarts', 'polynomial', 'inverse_sqrt', 'FP_low_epoch_annealing', 'FP_half_time_annealing','FP_raise_fall_creative'], info='Learning rate scheduler - defines how the learning rate changes over time. Custom schedulers: FP_low_epoch_annealing, FP_half_time_annealing, FP_raise_fall_creative (see README)', elem_classes=['slim-dropdown'])
with gr.Accordion(label='Checkpoints', open=True):
with gr.Row():
with gr.Column():
save_steps = gr.Number(label='Save every n steps', value=0, info='A checkpoint will be saved every n steps and at each Epoch boundary. (0 = OFF)')
with gr.Column():
save_steps_under_loss = gr.Slider(label='Save at 10% Loss change', value=1.8, minimum=0.0, maximum=3.0, step=0.1, info="Saves checkpoints at (or bellow) this loss and then each time loss falls by at least 10% This works independently from 'Save every n steps'")
with gr.Row():
save_chackpoint_now = gr.Button('Queue Checkpoint Now')
with gr.Accordion(label='Advanced Options', open=True):
with gr.Row():
with gr.Column():
warmup_steps = gr.Number(label='Warmup Steps', value=100, info='Number of max steps used for a linear warmup. Reduces early over-fitting by the first training blocks. Value has precedent over Warmup Ratio. Aligns to the closest multiple of graddient accumulation')
warmup_ratio = gr.Slider(label='Warmup Ratio', minimum=0.0, maximum=0.2, step=0.025, value=0.0, info='Ratio of total training steps that will be used for a linear warmup. It applies only if Warmup Step is 0.')
neft_noise_alpha = gr.Slider(label='NEFtune noise scale', minimum=0.0, maximum=15, step=1, value=0.0, info='Add noise to the training to improve generalization. [0 - OFF, Starting value to experiment: 5]')
training_projection = gr.Radio(value = train_choices[4], label='LLaMA Target Projections', info='Change the targets (LORA is typically q-v)', choices=train_choices)
lora_dropout = gr.Slider(label='LoRA Dropout', minimum=0.0, maximum=1.0, step=0.025, value=0.05, info='Percentage probability for dropout of LoRA layers. This can help reduce overfitting. Most users should leave at default.')
optimizer = gr.Dropdown(label='Optimizer', value='adamw_torch', choices=['adamw_hf', 'adamw_torch', 'adamw_torch_fused', 'adamw_torch_xla', 'adamw_apex_fused', 'adafactor', 'adamw_bnb_8bit', 'adamw_anyprecision', 'sgd', 'adagrad'], info='Different optimizer implementation options, for advanced users. Effects of different options are not well documented yet.', elem_classes=['slim-dropdown'])
with gr.Column():
train_only_after = gr.Textbox(label='Train Only After', value='', info='Only consider text *after* this string in any given chunk for training. For Alpaca datasets, use "### Response:" to only train the response and ignore the input.')
add_bos_token = gr.Checkbox(label='Add BOS token', value=True, info="Adds BOS token for each dataset item")
add_eos_token = gr.Checkbox(label='Add EOS token', value=False, info="Adds EOS token for each dataset item")
add_eos_token_type = gr.Dropdown(label='EOS placement (Text file)', choices=['Every Block', 'Hard Cut Blocks Only'], value='Every Block', info='', allow_custom_value = False)
higher_rank_limit = gr.Checkbox(label='Enable higher ranks', value=False, info='If checked, changes Rank/Alpha slider above to go much higher. This will not work without a datacenter-class GPU.')
report_to = gr.Radio(label="Save detailed logs with", value="None", choices=["None", "wandb", "tensorboard"], interactive=True)
# for future
#with gr.Accordion(label='Dynamic Scheduler', open = False):
# ds_min_epochs = gr.Number(label='Minimum Epochs', value='1', info='Minimum epochs that will be always performed before ramp down can be triggered')
# ds_max_epochs = gr.Number(label='Maximum Epochs (fallback)', value='50', info='Maximum Epochs before the training will bail out completely (should be a large number)')
# ds_loss_trigger = gr.Slider(label='Trigger Loss', minimum=0.0, maximum=2.8, step=0.1, value=1.6, info='Loss at which the ramp down schedule will be triggered')
# ds_loss_rolling_window = gr.Number(label='Loss rolling average', value='4', info='Calculate loss by averaging last x numbers to avoid jumps and noise')
# ds_epochs_to_ramp = gr.Slider(label='Ramp down ratio', minimum=0.0, maximum=2.0, step=0.1, value=1.00, info='How long the ramp down will last relative to ellapsed steps (before trigger)')
# gr.Markdown('These are settings for FP_dynamic_loss_trigger scheduler. The scheduler will do warm up, then hold constant untill a loss falls under Trigger Loss, then it will commence linear ramp down schedule and stop. The length of ramp down is set by Ramp down ratio where (ramp down steps) = ratio * (elapsed steps). (The time to completition shown will be very high untill ramp down is triggered.)')
with gr.Column():
with gr.Tab(label='Formatted Dataset'):
with gr.Row():
with gr.Column():
with gr.Row():
dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown'])
create_refresh_button(dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button')
with gr.Row():
eval_dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown'])
create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button')
with gr.Column():
with gr.Row():
format = gr.Dropdown(choices=get_datasets('training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown'])
create_refresh_button(format, lambda: None, lambda: {'choices': get_datasets('training/formats', 'json')}, 'refresh-button')
with gr.Row():
eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.')
with gr.Tab(label="Text file"):
with gr.Row():
raw_text_file = gr.Dropdown(choices=get_datasets('training/datasets', 'txt'), value='None', label='Text file', info='The text file to use for training.', elem_classes=['slim-dropdown'])
create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'txt')}, 'refresh-button')
with gr.Row():
with gr.Column():
precize_slicing_overlap = gr.Checkbox(label='Add Overlapping blocks', value = True)
sliding_window = gr.Checkbox(label='DEMENTOR Long-form Learning by FP (Highly Experimental, use low epochs)', value = False, info='Deep Memorization Enforcement Through Overlapping and Repetition. (I named it, so shush). Special process for learning long-form text using low amount of epochs.')
#debug_slicer = gr.Checkbox(label='Dump sentencelist.json to logs', value = non_serialized_params['debug_slicer'], info='Debug Slicer')
with gr.Column():
hard_cut_string = gr.Textbox(label='Hard Cut String', value='\\n\\n\\n', info='String that indicates a cut between logical blocks of text (ex. Ideas or Chapters). Helps prevent unwanted overlap between unrelated ideas.')
min_chars = gr.Number(label='Ignore small blocks', value=0, info='Ignore Text blocks that have less or equal characters than this number.')
with gr.Tab(label="URL"):
with gr.Row():
with gr.Column():
download_file_url = gr.Textbox(label='Download JSON or txt file to datasets (or formats) folder', value='',info='The URL of a file to download. If on github, make sure you get url of the raw file (https://raw.githubusercontent.com/...). If huggin face, make sure the url has /resolve/ in it not /blob/')
with gr.Row():
download_check_overwrite = gr.Checkbox(label='Overwrite', value=False, info='Overwrite if file exist')
download_folder = gr.Radio(label="Destination", value='training/datasets', choices=['training/datasets', 'training/formats'], interactive=True)
download_button = gr.Button('Download')
download_status = gr.Textbox(label='Download Status', value='', interactive=False)
with gr.Row():
with gr.Column():
with gr.Row():
cutoff_len = gr.Slider(label='Chunk Length (Cutoff Length)', minimum=32, maximum=2048, value=256, step=32, info='The maximum length of a chunk (in tokens). Applies to both JSON dataset and text files. Higher values require much more VRAM.')
with gr.Row():
with gr.Column():
check_dataset_btn = gr.Button('Verify Dataset/Text File and suggest data entries')
check_dataset_txt = gr.Textbox(label='Dataset info', value='')
with gr.Row():
start_button = gr.Button("Start LoRA Training", variant='primary')
stop_button = gr.Button("Interrupt")
with gr.Accordion(label="Graph", open=True):
with gr.Row():
# show_actions_button = False - we use old gradio
plot_graph = gr.LinePlot(x="epoch", y="value", title="Loss Metrics", overlay_point=True, tooltip=["epoch", "value"], x_lim=[0, 1], y_lim=[0, 3.5], width=500, height=250)
output = gr.Markdown(value="Ready")
with gr.Tab('Perplexity evaluation', elem_id='evaluate-tab'):
with gr.Row():
with gr.Column():
models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True)
evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + get_datasets('training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.')
with gr.Row():
with gr.Column():
stride_length = gr.Slider(label='Stride', minimum=1, maximum=2048, value=512, step=1, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.')
with gr.Column():
max_length = gr.Slider(label='max_length', minimum=0, maximum=8096, value=0, step=1, info='The context for each evaluation. If set to 0, the maximum context length for the model will be used.')
with gr.Row():
start_current_evaluation = gr.Button("Evaluate loaded model")
start_evaluation = gr.Button("Evaluate selected models")
stop_evaluation = gr.Button("Interrupt")
with gr.Column():
evaluation_log = gr.Markdown(value='')
evaluation_table = gr.Dataframe(value=generate_markdown_table(), interactive=True)
with gr.Row():
save_comments = gr.Button('Save comments', elem_classes="small-button")
refresh_table = gr.Button('Refresh the table', elem_classes="small-button")
# Training events
all_params = [lora_name, always_override, save_steps, micro_batch_size, batch_size, epochs, learning_rate, lr_scheduler_type, lora_rank, lora_alpha, lora_dropout, cutoff_len, dataset, eval_dataset, format, eval_steps, raw_text_file, higher_rank_limit, warmup_steps, optimizer, hard_cut_string, train_only_after, stop_at_loss, add_eos_token, min_chars, report_to, precize_slicing_overlap, add_eos_token_type, save_steps_under_loss, add_bos_token, training_projection,sliding_window,warmup_ratio,grad_accumulation, neft_noise_alpha]
def fix_old_version(batch_size_val,micro_batch_size_val, grad_accumulation_val):
if batch_size_val>0:
gradient_acc = batch_size_val // micro_batch_size_val
print(f"Using Old version of Batch Size ({batch_size_val}) to set Gradient Accumulation: {gradient_acc}")
return gradient_acc
return grad_accumulation_val
copy_from.change(partial(do_copy_params, all_params= all_params), copy_from, all_params).then(fix_old_version,[batch_size,micro_batch_size, grad_accumulation],grad_accumulation)
start_button.click(do_train, all_params, [output,plot_graph])
stop_button.click(do_interrupt, None, None, queue=False)
higher_rank_limit.change(change_rank_limit, [higher_rank_limit], [lora_rank, lora_alpha])
def trigger_stop_at_loss(stop_at_loss_value):
non_serialized_params.update({"stop_at_loss": stop_at_loss_value})
if non_serialized_params['training_loop']:
print(f"Queue: [Stop at loss Change] to {stop_at_loss_value}")
stop_at_loss.change(trigger_stop_at_loss, stop_at_loss, None)
def trigger_save_checkpoint():
non_serialized_params.update({"save_checkpoint_now": True})
if non_serialized_params['training_loop']:
print("Queue: [Save checkpoint] Checkpoint will be saved after the current step is finished.")
else:
print("Use during the training to save the checkpoint at any time.")
def update_button():
return gr.Button.update('[Checkpoint in Queue]', variant='stop', interactive=True)
def update_button2():
time.sleep(1.0)
return gr.Button.update('Queue Checkpoint Now', variant='secondary',interactive = True)
save_chackpoint_now.click(trigger_save_checkpoint, None, None).then(update_button, None,save_chackpoint_now).then(update_button2, None,save_chackpoint_now)
dataset_calc_params = [save_steps,micro_batch_size, epochs, cutoff_len, dataset, format, raw_text_file, warmup_steps, hard_cut_string, min_chars, precize_slicing_overlap,sliding_window,warmup_ratio,grad_accumulation]
def check_dataset(save_steps:int, micro_batch_size: int, epochs: int, cutoff_len: int, dataset:str, format:str, raw_text_file:str, warmup_steps:int, hard_cut_string:str, min_chars:int, precize_slicing_overlap:bool,sliding_window:bool,warmup_ratio:float,grad_accumulation:int):
result = "Specify JSON dastaset or Text file"
total_blocks = 0
if shared.tokenizer is None:
yield "Tokenizer is not available. Please Load some Model first."
return
if raw_text_file not in ['None', '']:
logger.info("Loading Text file...")
fullpath = clean_path('training/datasets', f'{raw_text_file}')
fullpath = Path(fullpath)
if fullpath.is_dir():
logger.info('Training path directory {}'.format(raw_text_file))
raw_text = ""
file_paths = sorted(fullpath.glob('*.txt'), key=lambda path: natural_keys(path.name))
for file_path in file_paths:
if file_path.is_file():
with file_path.open('r', encoding='utf-8') as file:
raw_text += file.read().replace('\r', '')
logger.info(f"Loaded training file: {file_path.name}")
else:
try:
with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file:
raw_text = file.read().replace('\r', '')
except:
yield f"{raw_text_file}.txt doesn't seem to exsist anymore... check your training/datasets folder"
return
if min_chars<0:
min_chars = 0
# == New more precise slicing on sentence boundary ==
if sliding_window:
text_chunks = sliding_block_cut(raw_text, min_chars, False, cutoff_len, hard_cut_string,non_serialized_params['debug_slicer'])
else:
text_chunks = precise_cut(raw_text, precize_slicing_overlap, min_chars, False, cutoff_len, hard_cut_string,non_serialized_params['debug_slicer'])
total_blocks = len(text_chunks)
result = f"Text: ({raw_text_file}.txt) has {total_blocks} blocks (Block Size {cutoff_len} tokens)"
del text_chunks
else:
if dataset in ['None', '']:
yield "Select dataset or text file."
return
if format in ['None', '']:
yield "Select format choice for dataset."
return
with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
format_data: dict[str, str] = json.load(formatFile)
def generate_prompt(data_point: dict[str, str]):
for options, data in format_data.items():
if set(options.split(',')) == set(x[0] for x in data_point.items() if (type(x[1]) is str and len(x[1].strip()) > 0)):
for key, val in data_point.items():
if type(val) is str:
data = data.replace(f'%{key}%', val)
return data
raise RuntimeError(f'Data-point "{data_point}" has no keyset match within format "{list(format_data.keys())}"')
def tokenize_dummy(prompt):
input_ids = shared.tokenizer.encode(prompt, truncation=True, max_length=cutoff_len)
labels = [1] * len(input_ids)
input_ids = torch.tensor(input_ids)
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": input_ids.ne(shared.tokenizer.pad_token_id),
}
def generate_and_tokenize_prompt(data_point):
prompt = generate_prompt(data_point)
return tokenize_dummy(prompt)
logger.info("Loading JSON datasets...")
data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json'))
data_keys = []
if data:
if 'train' in data: # Check if the 'train' split exists in the dataset
data_keys = list(data['train'][0].keys())
print("Data Keys:", data_keys)
else:
print("The dataset is empty.")
train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
total_blocks = train_data.num_rows
result = f"Dataset: ({dataset}.json) has {total_blocks} blocks @ length = {cutoff_len} tokens\n(Keys: {data_keys} - Format: {format}.json): "
#for options, data in format_data.items():
# format_keys = options.split(',')
# result += f"{format_keys}, "
#result = result.rstrip()
#result = result.rstrip(',')
if total_blocks>0:
number_ofSteps = int(math.ceil(total_blocks / micro_batch_size) * epochs)
num_stepsPer_epoch = int(math.ceil(number_ofSteps/epochs))
min_warm = math.ceil(100 / grad_accumulation)
warmup_steps_suggest = min(int(min_warm*grad_accumulation), int(math.ceil(number_ofSteps * 0.1)))
warmup_steps_suggest = min(warmup_steps_suggest,num_stepsPer_epoch)
save_each_n_min = int(math.ceil(number_ofSteps/10))
save_each_n_max = int(math.ceil(number_ofSteps/5))
gradient_accumulation_max = int(total_blocks)//micro_batch_size
result += f"\n[Batch Size: {micro_batch_size}, Epochs: {epochs}, Gradient Accumulation: {grad_accumulation}]\n"
result += f"Total number of steps: {number_ofSteps}\n"
result += f"Steps per each Epoch: {num_stepsPer_epoch}\n"
result += f"Suggestions:\n"
result += f"Checkpoints: Save every {save_each_n_min} - {save_each_n_max} steps (Current: {int(save_steps)})\n"
result += f"Warmup steps: {warmup_steps_suggest} (Current: {int(warmup_steps)})"
if gradient_accumulation_max < grad_accumulation:
result += f"\n\nWARNING: Gradient Accumulation {grad_accumulation} is too high: It should be below {gradient_accumulation_max}"
yield result
return
check_dataset_btn.click(check_dataset, dataset_calc_params ,check_dataset_txt)
# Evaluation events. For some reason, the interrupt event
# doesn't work with the .then() syntax, so I write them one
# by one in this ugly but functional way.
ev = start_evaluation.click(calculate_perplexity, [models, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
start_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False)
start_current_evaluation.click(lambda: ['current model'], None, tmp)
ev_cur = start_current_evaluation.click(calculate_perplexity, [tmp, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
start_current_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False)
stop_evaluation.click(None, None, None, cancels=[ev, ev_cur], queue=False)
refresh_table.click(generate_markdown_table, None, evaluation_table, show_progress=True)
save_comments.click(
save_past_evaluations, evaluation_table, None).then(
lambda: "Comments saved.", None, evaluation_log, show_progress=False)
def reload_lora():
return gr.Dropdown.update(choices=get_available_loras_local(non_serialized_params['Lora_sortedByTime']))
# nonserialized items
sort_byTime.change(lambda x: non_serialized_params.update({"Lora_sortedByTime": x}), sort_byTime, None).then(reload_lora,None,copy_from)
#debug_slicer.change(lambda x: non_serialized_params.update({"debug_slicer": x}), debug_slicer, None)
def update_dataset():
return gr.update(choices=get_datasets('training/datasets', 'json')), gr.update(choices=get_datasets('training/datasets', 'txt'))
| download_button.click(download_file_from_url, [download_file_url,download_check_overwrite,download_folder] , download_status).then(update_dataset,None,[dataset , raw_text_file]) | 6 | 2023-12-20 14:13:38+00:00 | 12k |
foocker/Bert-VITS2-Faster | text/chinese.py | [
{
"identifier": "punctuation",
"path": "text/symbols.py",
"snippet": ""
},
{
"identifier": "ToneSandhi",
"path": "text/tone_sandhi.py",
"snippet": "class ToneSandhi:\n def __init__(self):\n self.must_neural_tone_words = {\n \"麻烦\",\n \"麻利\",\n \"鸳鸯\",\n \"高粱\",\n \"骨头\",\n \"骆驼\",\n \"马虎\",\n \"首饰\",\n \"馒头\",\n \"馄饨\",\n \"风筝\",\n \"难为\",\n \"队伍\",\n \"阔气\",\n \"闺女\",\n \"门道\",\n \"锄头\",\n \"铺盖\",\n \"铃铛\",\n \"铁匠\",\n \"钥匙\",\n \"里脊\",\n \"里头\",\n \"部分\",\n \"那么\",\n \"道士\",\n \"造化\",\n \"迷糊\",\n \"连累\",\n \"这么\",\n \"这个\",\n \"运气\",\n \"过去\",\n \"软和\",\n \"转悠\",\n \"踏实\",\n \"跳蚤\",\n \"跟头\",\n \"趔趄\",\n \"财主\",\n \"豆腐\",\n \"讲究\",\n \"记性\",\n \"记号\",\n \"认识\",\n \"规矩\",\n \"见识\",\n \"裁缝\",\n \"补丁\",\n \"衣裳\",\n \"衣服\",\n \"衙门\",\n \"街坊\",\n \"行李\",\n \"行当\",\n \"蛤蟆\",\n \"蘑菇\",\n \"薄荷\",\n \"葫芦\",\n \"葡萄\",\n \"萝卜\",\n \"荸荠\",\n \"苗条\",\n \"苗头\",\n \"苍蝇\",\n \"芝麻\",\n \"舒服\",\n \"舒坦\",\n \"舌头\",\n \"自在\",\n \"膏药\",\n \"脾气\",\n \"脑袋\",\n \"脊梁\",\n \"能耐\",\n \"胳膊\",\n \"胭脂\",\n \"胡萝\",\n \"胡琴\",\n \"胡同\",\n \"聪明\",\n \"耽误\",\n \"耽搁\",\n \"耷拉\",\n \"耳朵\",\n \"老爷\",\n \"老实\",\n \"老婆\",\n \"老头\",\n \"老太\",\n \"翻腾\",\n \"罗嗦\",\n \"罐头\",\n \"编辑\",\n \"结实\",\n \"红火\",\n \"累赘\",\n \"糨糊\",\n \"糊涂\",\n \"精神\",\n \"粮食\",\n \"簸箕\",\n \"篱笆\",\n \"算计\",\n \"算盘\",\n \"答应\",\n \"笤帚\",\n \"笑语\",\n \"笑话\",\n \"窟窿\",\n \"窝囊\",\n \"窗户\",\n \"稳当\",\n \"稀罕\",\n \"称呼\",\n \"秧歌\",\n \"秀气\",\n \"秀才\",\n \"福气\",\n \"祖宗\",\n \"砚台\",\n \"码头\",\n \"石榴\",\n \"石头\",\n \"石匠\",\n \"知识\",\n \"眼睛\",\n \"眯缝\",\n \"眨巴\",\n \"眉毛\",\n \"相声\",\n \"盘算\",\n \"白净\",\n \"痢疾\",\n \"痛快\",\n \"疟疾\",\n \"疙瘩\",\n \"疏忽\",\n \"畜生\",\n \"生意\",\n \"甘蔗\",\n \"琵琶\",\n \"琢磨\",\n \"琉璃\",\n \"玻璃\",\n \"玫瑰\",\n \"玄乎\",\n \"狐狸\",\n \"状元\",\n \"特务\",\n \"牲口\",\n \"牙碜\",\n \"牌楼\",\n \"爽快\",\n \"爱人\",\n \"热闹\",\n \"烧饼\",\n \"烟筒\",\n \"烂糊\",\n \"点心\",\n \"炊帚\",\n \"灯笼\",\n \"火候\",\n \"漂亮\",\n \"滑溜\",\n \"溜达\",\n \"温和\",\n \"清楚\",\n \"消息\",\n \"浪头\",\n \"活泼\",\n \"比方\",\n \"正经\",\n \"欺负\",\n \"模糊\",\n \"槟榔\",\n \"棺材\",\n \"棒槌\",\n \"棉花\",\n \"核桃\",\n \"栅栏\",\n \"柴火\",\n \"架势\",\n \"枕头\",\n \"枇杷\",\n \"机灵\",\n \"本事\",\n \"木头\",\n \"木匠\",\n \"朋友\",\n \"月饼\",\n \"月亮\",\n \"暖和\",\n \"明白\",\n \"时候\",\n \"新鲜\",\n \"故事\",\n \"收拾\",\n \"收成\",\n \"提防\",\n \"挖苦\",\n \"挑剔\",\n \"指甲\",\n \"指头\",\n \"拾掇\",\n \"拳头\",\n \"拨弄\",\n \"招牌\",\n \"招呼\",\n \"抬举\",\n \"护士\",\n \"折腾\",\n \"扫帚\",\n \"打量\",\n \"打算\",\n \"打点\",\n \"打扮\",\n \"打听\",\n \"打发\",\n \"扎实\",\n \"扁担\",\n \"戒指\",\n \"懒得\",\n \"意识\",\n \"意思\",\n \"情形\",\n \"悟性\",\n \"怪物\",\n \"思量\",\n \"怎么\",\n \"念头\",\n \"念叨\",\n \"快活\",\n \"忙活\",\n \"志气\",\n \"心思\",\n \"得罪\",\n \"张罗\",\n \"弟兄\",\n \"开通\",\n \"应酬\",\n \"庄稼\",\n \"干事\",\n \"帮手\",\n \"帐篷\",\n \"希罕\",\n \"师父\",\n \"师傅\",\n \"巴结\",\n \"巴掌\",\n \"差事\",\n \"工夫\",\n \"岁数\",\n \"屁股\",\n \"尾巴\",\n \"少爷\",\n \"小气\",\n \"小伙\",\n \"将就\",\n \"对头\",\n \"对付\",\n \"寡妇\",\n \"家伙\",\n \"客气\",\n \"实在\",\n \"官司\",\n \"学问\",\n \"学生\",\n \"字号\",\n \"嫁妆\",\n \"媳妇\",\n \"媒人\",\n \"婆家\",\n \"娘家\",\n \"委屈\",\n \"姑娘\",\n \"姐夫\",\n \"妯娌\",\n \"妥当\",\n \"妖精\",\n \"奴才\",\n \"女婿\",\n \"头发\",\n \"太阳\",\n \"大爷\",\n \"大方\",\n \"大意\",\n \"大夫\",\n \"多少\",\n \"多么\",\n \"外甥\",\n \"壮实\",\n \"地道\",\n \"地方\",\n \"在乎\",\n \"困难\",\n \"嘴巴\",\n \"嘱咐\",\n \"嘟囔\",\n \"嘀咕\",\n \"喜欢\",\n \"喇嘛\",\n \"喇叭\",\n \"商量\",\n \"唾沫\",\n \"哑巴\",\n \"哈欠\",\n \"哆嗦\",\n \"咳嗽\",\n \"和尚\",\n \"告诉\",\n \"告示\",\n \"含糊\",\n \"吓唬\",\n \"后头\",\n \"名字\",\n \"名堂\",\n \"合同\",\n \"吆喝\",\n \"叫唤\",\n \"口袋\",\n \"厚道\",\n \"厉害\",\n \"千斤\",\n \"包袱\",\n \"包涵\",\n \"匀称\",\n \"勤快\",\n \"动静\",\n \"动弹\",\n \"功夫\",\n \"力气\",\n \"前头\",\n \"刺猬\",\n \"刺激\",\n \"别扭\",\n \"利落\",\n \"利索\",\n \"利害\",\n \"分析\",\n \"出息\",\n \"凑合\",\n \"凉快\",\n \"冷战\",\n \"冤枉\",\n \"冒失\",\n \"养活\",\n \"关系\",\n \"先生\",\n \"兄弟\",\n \"便宜\",\n \"使唤\",\n \"佩服\",\n \"作坊\",\n \"体面\",\n \"位置\",\n \"似的\",\n \"伙计\",\n \"休息\",\n \"什么\",\n \"人家\",\n \"亲戚\",\n \"亲家\",\n \"交情\",\n \"云彩\",\n \"事情\",\n \"买卖\",\n \"主意\",\n \"丫头\",\n \"丧气\",\n \"两口\",\n \"东西\",\n \"东家\",\n \"世故\",\n \"不由\",\n \"不在\",\n \"下水\",\n \"下巴\",\n \"上头\",\n \"上司\",\n \"丈夫\",\n \"丈人\",\n \"一辈\",\n \"那个\",\n \"菩萨\",\n \"父亲\",\n \"母亲\",\n \"咕噜\",\n \"邋遢\",\n \"费用\",\n \"冤家\",\n \"甜头\",\n \"介绍\",\n \"荒唐\",\n \"大人\",\n \"泥鳅\",\n \"幸福\",\n \"熟悉\",\n \"计划\",\n \"扑腾\",\n \"蜡烛\",\n \"姥爷\",\n \"照顾\",\n \"喉咙\",\n \"吉他\",\n \"弄堂\",\n \"蚂蚱\",\n \"凤凰\",\n \"拖沓\",\n \"寒碜\",\n \"糟蹋\",\n \"倒腾\",\n \"报复\",\n \"逻辑\",\n \"盘缠\",\n \"喽啰\",\n \"牢骚\",\n \"咖喱\",\n \"扫把\",\n \"惦记\",\n }\n self.must_not_neural_tone_words = {\n \"男子\",\n \"女子\",\n \"分子\",\n \"原子\",\n \"量子\",\n \"莲子\",\n \"石子\",\n \"瓜子\",\n \"电子\",\n \"人人\",\n \"虎虎\",\n }\n self.punc = \":,;。?!“”‘’':,;.?!\"\n\n # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041\n # e.g.\n # word: \"家里\"\n # pos: \"s\"\n # finals: ['ia1', 'i3']\n def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:\n # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺\n for j, item in enumerate(word):\n if (\n j - 1 >= 0\n and item == word[j - 1]\n and pos[0] in {\"n\", \"v\", \"a\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[j] = finals[j][:-1] + \"5\"\n ge_idx = word.find(\"个\")\n if len(word) >= 1 and word[-1] in \"吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶\":\n finals[-1] = finals[-1][:-1] + \"5\"\n elif len(word) >= 1 and word[-1] in \"的地得\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 走了, 看着, 去过\n # elif len(word) == 1 and word in \"了着过\" and pos in {\"ul\", \"uz\", \"ug\"}:\n # finals[-1] = finals[-1][:-1] + \"5\"\n elif (\n len(word) > 1\n and word[-1] in \"们子\"\n and pos in {\"r\", \"n\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 桌上, 地下, 家里\n elif len(word) > 1 and word[-1] in \"上下里\" and pos in {\"s\", \"l\", \"f\"}:\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 上来, 下去\n elif len(word) > 1 and word[-1] in \"来去\" and word[-2] in \"上下进出回过起开\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # 个做量词\n elif (\n ge_idx >= 1\n and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in \"几有两半多各整每做是\")\n ) or word == \"个\":\n finals[ge_idx] = finals[ge_idx][:-1] + \"5\"\n else:\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n\n word_list = self._split_word(word)\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n for i, word in enumerate(word_list):\n # conventional neural in Chinese\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals_list[i][-1] = finals_list[i][-1][:-1] + \"5\"\n finals = sum(finals_list, [])\n return finals\n\n def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # e.g. 看不懂\n if len(word) == 3 and word[1] == \"不\":\n finals[1] = finals[1][:-1] + \"5\"\n else:\n for i, char in enumerate(word):\n # \"不\" before tone4 should be bu2, e.g. 不怕\n if char == \"不\" and i + 1 < len(word) and finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n return finals\n\n def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # \"一\" in number sequences, e.g. 一零零, 二一零\n if word.find(\"一\") != -1 and all(\n [item.isnumeric() for item in word if item != \"一\"]\n ):\n return finals\n # \"一\" between reduplication words should be yi5, e.g. 看一看\n elif len(word) == 3 and word[1] == \"一\" and word[0] == word[-1]:\n finals[1] = finals[1][:-1] + \"5\"\n # when \"一\" is ordinal word, it should be yi1\n elif word.startswith(\"第一\"):\n finals[1] = finals[1][:-1] + \"1\"\n else:\n for i, char in enumerate(word):\n if char == \"一\" and i + 1 < len(word):\n # \"一\" before tone4 should be yi2, e.g. 一段\n if finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n # \"一\" before non-tone4 should be yi4, e.g. 一天\n else:\n # \"一\" 后面如果是标点,还读一声\n if word[i + 1] not in self.punc:\n finals[i] = finals[i][:-1] + \"4\"\n return finals\n\n def _split_word(self, word: str) -> List[str]:\n word_list = jieba.cut_for_search(word)\n word_list = sorted(word_list, key=lambda i: len(i), reverse=False)\n first_subword = word_list[0]\n first_begin_idx = word.find(first_subword)\n if first_begin_idx == 0:\n second_subword = word[len(first_subword) :]\n new_word_list = [first_subword, second_subword]\n else:\n second_subword = word[: -len(first_subword)]\n new_word_list = [second_subword, first_subword]\n return new_word_list\n\n def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:\n if len(word) == 2 and self._all_tone_three(finals):\n finals[0] = finals[0][:-1] + \"2\"\n elif len(word) == 3:\n word_list = self._split_word(word)\n if self._all_tone_three(finals):\n # disyllabic + monosyllabic, e.g. 蒙古/包\n if len(word_list[0]) == 2:\n finals[0] = finals[0][:-1] + \"2\"\n finals[1] = finals[1][:-1] + \"2\"\n # monosyllabic + disyllabic, e.g. 纸/老虎\n elif len(word_list[0]) == 1:\n finals[1] = finals[1][:-1] + \"2\"\n else:\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n if len(finals_list) == 2:\n for i, sub in enumerate(finals_list):\n # e.g. 所有/人\n if self._all_tone_three(sub) and len(sub) == 2:\n finals_list[i][0] = finals_list[i][0][:-1] + \"2\"\n # e.g. 好/喜欢\n elif (\n i == 1\n and not self._all_tone_three(sub)\n and finals_list[i][0][-1] == \"3\"\n and finals_list[0][-1][-1] == \"3\"\n ):\n finals_list[0][-1] = finals_list[0][-1][:-1] + \"2\"\n finals = sum(finals_list, [])\n # split idiom into two words who's length is 2\n elif len(word) == 4:\n finals_list = [finals[:2], finals[2:]]\n finals = []\n for sub in finals_list:\n if self._all_tone_three(sub):\n sub[0] = sub[0][:-1] + \"2\"\n finals += sub\n\n return finals\n\n def _all_tone_three(self, finals: List[str]) -> bool:\n return all(x[-1] == \"3\" for x in finals)\n\n # merge \"不\" and the word behind it\n # if don't merge, \"不\" sometimes appears alone according to jieba, which may occur sandhi error\n def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n last_word = \"\"\n for word, pos in seg:\n if last_word == \"不\":\n word = last_word + word\n if word != \"不\":\n new_seg.append((word, pos))\n last_word = word[:]\n if last_word == \"不\":\n new_seg.append((last_word, \"d\"))\n last_word = \"\"\n return new_seg\n\n # function 1: merge \"一\" and reduplication words in it's left and right, e.g. \"听\",\"一\",\"听\" ->\"听一听\"\n # function 2: merge single \"一\" and the word behind it\n # if don't merge, \"一\" sometimes appears alone according to jieba, which may occur sandhi error\n # e.g.\n # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]\n # output seg: [['听一听', 'v']]\n def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n # function 1\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and word == \"一\"\n and i + 1 < len(seg)\n and seg[i - 1][0] == seg[i + 1][0]\n and seg[i - 1][1] == \"v\"\n ):\n new_seg[i - 1][0] = new_seg[i - 1][0] + \"一\" + new_seg[i - 1][0]\n else:\n if (\n i - 2 >= 0\n and seg[i - 1][0] == \"一\"\n and seg[i - 2][0] == word\n and pos == \"v\"\n ):\n continue\n else:\n new_seg.append([word, pos])\n seg = new_seg\n new_seg = []\n # function 2\n for i, (word, pos) in enumerate(seg):\n if new_seg and new_seg[-1][0] == \"一\":\n new_seg[-1][0] = new_seg[-1][0] + word\n else:\n new_seg.append([word, pos])\n return new_seg\n\n # the first and the second words are all_tone_three\n def _merge_continuous_three_tones(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and self._all_tone_three(sub_finals_list[i - 1])\n and self._all_tone_three(sub_finals_list[i])\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n\n return new_seg\n\n def _is_reduplication(self, word: str) -> bool:\n return len(word) == 2 and word[0] == word[1]\n\n # the last char of first word and the first char of second word is tone_three\n def _merge_continuous_three_tones_2(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and sub_finals_list[i - 1][-1][-1] == \"3\"\n and sub_finals_list[i][0][-1] == \"3\"\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if i - 1 >= 0 and word == \"儿\" and seg[i - 1][0] != \"#\":\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_reduplication(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if new_seg and word == new_seg[-1][0]:\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n seg = self._merge_bu(seg)\n try:\n seg = self._merge_yi(seg)\n except:\n print(\"_merge_yi failed\")\n seg = self._merge_reduplication(seg)\n seg = self._merge_continuous_three_tones(seg)\n seg = self._merge_continuous_three_tones_2(seg)\n seg = self._merge_er(seg)\n return seg\n\n def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:\n finals = self._bu_sandhi(word, finals)\n finals = self._yi_sandhi(word, finals)\n finals = self._neural_sandhi(word, pos, finals)\n finals = self._three_sandhi(word, finals)\n return finals"
}
] | import os
import re
import cn2an
import sys
import jieba.posseg as psg
from pypinyin import lazy_pinyin, Style
from text.symbols import punctuation
from text.tone_sandhi import ToneSandhi
from text import chinese_bert
from text.chinese_bert import get_bert_feature | 7,610 |
sys.path.insert(0,"/data/stable-diffusion-tritonserver/Bert-VITS2")
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
|
sys.path.insert(0,"/data/stable-diffusion-tritonserver/Bert-VITS2")
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
| tone_modifier = ToneSandhi() | 1 | 2023-12-18 09:53:41+00:00 | 12k |
sinoyou/nelf-pro | nerfstudio/engine/trainer.py | [
{
"identifier": "base_config",
"path": "nerfstudio/configs/base_config.py",
"snippet": "CONSOLE = Console(width=120)\nclass PrintableConfig: # pylint: disable=too-few-public-methods\nclass InstantiateConfig(PrintableConfig): # pylint: disable=too-few-public-methods\nclass MachineConfig(PrintableConfig):\nclass LocalWriterConfig(InstantiateConfig):\nclass LoggingConfig(PrintableConfig):\nclass TrainerConfig(PrintableConfig):\nclass ViewerConfig(PrintableConfig):\nclass Config(PrintableConfig):\n def __str__(self):\n def setup(self, **kwargs) -> Any:\n def setup(self, banner_messages: Optional[List[str]] = None, **kwargs) -> Any:\n def is_viewer_enabled(self) -> bool:\n def is_wandb_enabled(self) -> bool:\n def is_tensorboard_enabled(self) -> bool:\n def set_timestamp(self) -> None:\n def set_experiment_name(self) -> None:\n def get_base_dir(self) -> Path:\n def get_checkpoint_dir(self) -> Path:\n def print_to_terminal(self) -> None:\n def save_config(self) -> None:"
},
{
"identifier": "TrainingCallback",
"path": "nerfstudio/engine/callbacks.py",
"snippet": "class TrainingCallback:\n \"\"\"Callback class used during training.\n The function 'func' with 'args' and 'kwargs' will be called every 'update_every_num_iters' training iterations,\n including at iteration 0. The function is called after the training iteration.\n\n Args:\n where_to_run: List of locations for when to run callbak (before/after iteration)\n func: The function that will be called.\n update_every_num_iters: How often to call the function `func`.\n iters: Tuple of iteration steps to perform callback\n args: args for the function 'func'.\n kwargs: kwargs for the function 'func'.\n \"\"\"\n\n def __init__(\n self,\n where_to_run: List[TrainingCallbackLocation],\n func: Callable,\n update_every_num_iters: Optional[int] = None,\n iters: Optional[Tuple[int, ...]] = None,\n args: Optional[List] = None,\n kwargs: Optional[Dict] = None,\n ):\n assert (\n \"step\" in signature(func).parameters.keys()\n ), f\"'step: int' must be an argument in the callback function 'func': {func.__name__}\"\n self.where_to_run = where_to_run\n self.update_every_num_iters = update_every_num_iters\n self.iters = iters\n self.func = func\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n\n def run_callback(self, step: int):\n \"\"\"Callback to run after training step\n\n Args:\n step: current iteration step\n \"\"\"\n if self.update_every_num_iters is not None:\n if step % self.update_every_num_iters == 0:\n self.func(*self.args, **self.kwargs, step=step)\n elif self.iters is not None:\n if step in self.iters:\n self.func(*self.args, **self.kwargs, step=step)\n\n def run_callback_at_location(self, step: int, location: TrainingCallbackLocation):\n \"\"\"Runs the callback if it's supposed to be run at the given location.\n\n Args:\n step: current iteration step\n location: when to run callback (before/after iteration)\n \"\"\"\n if location in self.where_to_run:\n self.run_callback(step=step)"
},
{
"identifier": "TrainingCallbackAttributes",
"path": "nerfstudio/engine/callbacks.py",
"snippet": "class TrainingCallbackAttributes:\n \"\"\"Attributes that can be used to configure training callbacks.\n The callbacks can be specified in the Dataloader or Model implementations.\n Instead of providing access to the entire Trainer object, we only provide these attributes.\n This should be least prone to errors and fairly clean from a user perspective.\"\"\"\n\n # TODO(ethan): type this without circular imports\n optimizers: Optional[InitVar]\n \"\"\"optimizers for training\"\"\"\n grad_scaler: Optional[InitVar]\n \"\"\"gradient scalers\"\"\"\n pipeline: Optional[InitVar]\n \"\"\"reference to training pipeline\"\"\"\n config: TrainerConfig\n \"\"\"the trainer config\"\"\""
},
{
"identifier": "TrainingCallbackLocation",
"path": "nerfstudio/engine/callbacks.py",
"snippet": "class TrainingCallbackLocation(Enum):\n \"\"\"Enum for specifying where the training callback should be run.\"\"\"\n\n BEFORE_TRAIN_ITERATION = auto()\n AFTER_TRAIN_ITERATION = auto()"
},
{
"identifier": "Optimizers",
"path": "nerfstudio/engine/optimizers.py",
"snippet": "class Optimizers:\n \"\"\"A set of optimizers.\n\n Args:\n config: The optimizer configuration object.\n param_groups: A dictionary of parameter groups to optimize.\n \"\"\"\n\n def __init__(self, config: Dict[str, Any], param_groups: Dict[str, List[Parameter]]):\n self.config = config\n self.optimizers = {}\n self.schedulers = {}\n for param_group_name, params in param_groups.items():\n lr_init = config[param_group_name][\"optimizer\"].lr\n self.optimizers[param_group_name] = config[param_group_name][\"optimizer\"].setup(params=params)\n if config[param_group_name][\"scheduler\"]:\n self.schedulers[param_group_name] = config[param_group_name][\"scheduler\"].setup(\n optimizer=self.optimizers[param_group_name], lr_init=lr_init\n )\n\n def optimizer_step(self, param_group_name: str) -> None:\n \"\"\"Fetch and step corresponding optimizer.\n\n Args:\n param_group_name: name of optimizer to step forward\n \"\"\"\n self.optimizers[param_group_name].step()\n\n def scheduler_step(self, param_group_name: str) -> None:\n \"\"\"Fetch and step corresponding scheduler.\n\n Args:\n param_group_name: name of scheduler to step forward\n \"\"\"\n if self.config.param_group_name.scheduler: # type: ignore\n self.schedulers[param_group_name].step()\n\n def zero_grad_all(self) -> None:\n \"\"\"Zero the gradients for all optimizer parameters.\"\"\"\n for _, optimizer in self.optimizers.items():\n optimizer.zero_grad()\n\n def optimizer_scaler_step_all(self, grad_scaler: GradScaler) -> None:\n \"\"\"Take an optimizer step using a grad scaler.\n\n Args:\n grad_scaler: GradScaler to use\n \"\"\"\n for _, optimizer in self.optimizers.items():\n grad_scaler.step(optimizer)\n\n def optimizer_step_all(self):\n \"\"\"Run step for all optimizers.\"\"\"\n for _, optimizer in self.optimizers.items():\n # note that they key is the parameter name\n optimizer.step()\n\n def scheduler_step_all(self, step: int) -> None:\n \"\"\"Run step for all schedulers.\n\n Args:\n step: the current step\n \"\"\"\n for param_group_name, scheduler in self.schedulers.items():\n scheduler.step()\n # TODO(ethan): clean this up. why is there indexing into a list?\n lr = scheduler.get_last_lr()[0]\n writer.put_scalar(name=f\"learning_rate/{param_group_name}\", scalar=lr, step=step)\n\n def load_optimizers(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Helper to load the optimizer state from previous checkpoint\n\n Args:\n loaded_state: the state from the previous checkpoint\n \"\"\"\n for k, v in loaded_state.items():\n self.optimizers[k].load_state_dict(v)\n\n def load_schedulers(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Helper to load the schedulers state from previous checkpoint\n\n Args:\n loaded_state: the state from the previous checkpoint\n \"\"\"\n for k, v in loaded_state.items():\n self.schedulers[k].load_state_dict(v)"
},
{
"identifier": "setup_optimizers",
"path": "nerfstudio/engine/optimizers.py",
"snippet": "def setup_optimizers(config: base_config.Config, param_groups: Dict[str, List[Parameter]]) -> \"Optimizers\":\n \"\"\"Helper to set up the optimizers\n\n Args:\n config: The trainer configuration object.\n param_groups: A dictionary of parameter groups to optimize.\n\n Returns:\n The optimizers object.\n \"\"\"\n optimizer_config = config.optimizers.copy()\n\n # Add the camera optimizer if enabled.\n camera_optimizer_config = config.pipeline.datamanager.camera_optimizer\n if camera_optimizer_config.mode != \"off\":\n assert camera_optimizer_config.param_group not in optimizer_config\n optimizer_config[camera_optimizer_config.param_group] = {\n \"optimizer\": config.pipeline.datamanager.camera_optimizer.optimizer,\n \"scheduler\": config.pipeline.datamanager.camera_optimizer.scheduler,\n }\n return Optimizers(optimizer_config, param_groups)"
},
{
"identifier": "VanillaPipeline",
"path": "nerfstudio/pipelines/base_pipeline.py",
"snippet": "class VanillaPipeline(Pipeline):\n \"\"\"The pipeline class for the vanilla nerf setup of multiple cameras for one or a few scenes.\n\n config: configuration to instantiate pipeline\n device: location to place model and data\n test_mode:\n 'val': loads train/val datasets into memory\n 'test': loads train/test datset into memory\n 'inference': does not load any dataset into memory\n world_size: total number of machines available\n local_rank: rank of current machine\n\n Attributes:\n datamanager: The data manager that will be used\n model: The model that will be used\n \"\"\"\n\n def __init__(\n self,\n config: VanillaPipelineConfig,\n device: str,\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n load_step: int = None, \n ):\n super().__init__()\n self.config = config\n self.test_mode = test_mode\n self.datamanager: VanillaDataManager = config.datamanager.setup(\n device=device, test_mode=test_mode, world_size=world_size, local_rank=local_rank\n )\n self.datamanager.to(device)\n # TODO(ethan): get rid of scene_bounds from the model\n assert self.datamanager.train_dataset is not None, \"Missing input dataset\"\n\n self._model = config.model.setup(\n scene_box=self.datamanager.train_dataset.scene_box,\n num_train_data=len(self.datamanager.train_dataset),\n metadata=self.datamanager.train_dataset.metadata,\n world_size=world_size,\n local_rank=local_rank,\n load_step=load_step,\n )\n self.model.to(device)\n\n self.world_size = world_size\n if world_size > 1:\n self._model = typing.cast(Model, DDP(self._model, device_ids=[local_rank], find_unused_parameters=True))\n dist.barrier(device_ids=[local_rank])\n\n @property\n def device(self):\n \"\"\"Returns the device that the model is on.\"\"\"\n return self.model.device\n\n @profiler.time_function\n def get_train_loss_dict(self, step: int):\n \"\"\"This function gets your training loss dict. This will be responsible for\n getting the next batch of data from the DataManager and interfacing with the\n Model class, feeding the data to the model's forward function.\n\n Args:\n step: current iteration step to update sampler if using DDP (distributed)\n \"\"\"\n ray_bundle, batch = self.datamanager.next_train(step)\n model_outputs = self._model(ray_bundle)\n metrics_dict = self.model.get_metrics_dict(model_outputs, batch)\n\n camera_opt_param_group = self.config.datamanager.camera_optimizer.param_group\n if camera_opt_param_group in self.datamanager.get_param_groups():\n # Report the camera optimization metrics\n metrics_dict[\"camera_opt_translation\"] = (\n self.datamanager.get_param_groups()[camera_opt_param_group][0].data[:, :3].norm()\n )\n metrics_dict[\"camera_opt_rotation\"] = (\n self.datamanager.get_param_groups()[camera_opt_param_group][0].data[:, 3:].norm()\n )\n\n loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)\n\n return model_outputs, loss_dict, metrics_dict\n\n def forward(self):\n \"\"\"Blank forward method\n\n This is an nn.Module, and so requires a forward() method normally, although in our case\n we do not need a forward() method\"\"\"\n raise NotImplementedError\n\n def get_scene_plotly_figure(self) -> plotly.graph_objects.Figure:\n \"\"\"\n This function utilizes plotly utils to create a plotly figure of the scene. \n \"\"\"\n\n train_out = self.datamanager.get_train_parser_output()\n eval_out = self.datamanager.get_eval_parser_output()\n\n if train_out.other_data.get(\"scene_plotly\") is None:\n return None\n \n plotly_scene_data = []\n\n plotly_scene_data += train_out.other_data['scene_plotly']\n plotly_scene_data += eval_out.other_data['scene_plotly']\n\n return plotly_scene_data\n\n @profiler.time_function\n def get_eval_loss_dict(self, step: int):\n \"\"\"This function gets your evaluation loss dict. It needs to get the data\n from the DataManager and feed it to the model's forward function\n\n Args:\n step: current iteration step\n \"\"\"\n self.eval()\n ray_bundle, batch = self.datamanager.next_eval(step)\n model_outputs = self.model(ray_bundle)\n metrics_dict = self.model.get_metrics_dict(model_outputs, batch)\n loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)\n self.train()\n return model_outputs, loss_dict, metrics_dict\n\n @profiler.time_function\n def get_eval_image_metrics_and_images(self, step: int):\n \"\"\"This function gets your evaluation loss dict. It needs to get the data\n from the DataManager and feed it to the model's forward function\n\n Args:\n step: current iteration step\n \"\"\"\n self.eval()\n image_idx, camera_ray_bundle, batch = self.datamanager.next_eval_image(step)\n outputs = self.model.get_outputs_for_camera_ray_bundle(camera_ray_bundle)\n metrics_dict, images_dict = self.model.get_image_metrics_and_images(outputs, batch)\n assert \"image_idx\" not in metrics_dict\n metrics_dict[\"image_idx\"] = image_idx\n assert \"num_rays\" not in metrics_dict\n metrics_dict[\"num_rays\"] = len(camera_ray_bundle)\n self.train()\n return metrics_dict, images_dict\n\n @profiler.time_function\n def get_average_eval_image_metrics(self, step: Optional[int] = None):\n \"\"\"Iterate over all the images in the eval dataset and get the average.\n\n Returns:\n metrics_dict: dictionary of metrics\n \"\"\"\n self.eval()\n metrics_dict_list = []\n images_dict_list = []\n num_images = len(self.datamanager.fixed_indices_eval_dataloader)\n with Progress(\n TextColumn(\"[progress.description]{task.description}\"),\n BarColumn(),\n TimeElapsedColumn(),\n MofNCompleteColumn(),\n transient=True,\n ) as progress:\n task = progress.add_task(\"[green]Evaluating all eval images...\", total=num_images)\n for camera_ray_bundle, batch in self.datamanager.fixed_indices_eval_dataloader:\n isbasicimages = False\n if isinstance(\n batch[\"image\"], BasicImages\n ): # If this is a generalized dataset, we need to get image tensor\n isbasicimages = True\n batch[\"image\"] = batch[\"image\"].images[0]\n camera_ray_bundle = camera_ray_bundle.reshape((*batch[\"image\"].shape[:-1],))\n # time this the following line\n inner_start = time()\n height, width = camera_ray_bundle.shape\n num_rays = height * width\n outputs = self.model.get_outputs_for_camera_ray_bundle(camera_ray_bundle)\n metrics_dict, images_dict = self.model.get_image_metrics_and_images(outputs, batch)\n for k in images_dict.keys():\n images_dict[k] = images_dict[k].cpu()\n metrics_dict['image_filename'] = batch['image_filename']\n images_dict['image_filename'] = batch['image_filename']\n assert \"num_rays_per_sec\" not in metrics_dict\n metrics_dict[\"num_rays_per_sec\"] = num_rays / (time() - inner_start)\n fps_str = \"fps\"\n assert fps_str not in metrics_dict\n metrics_dict[fps_str] = metrics_dict[\"num_rays_per_sec\"] / (height * width)\n metrics_dict_list.append(metrics_dict)\n images_dict_list.append(images_dict)\n progress.advance(task)\n \n # average the metrics list\n metrics_dict = {}\n for key in metrics_dict_list[0].keys():\n if isinstance(metrics_dict_list[0][key], (float, int, torch.Tensor)):\n metrics_dict[key] = float(\n torch.mean(torch.tensor([metrics_dict[key] for metrics_dict in metrics_dict_list]))\n )\n\n self.train()\n return metrics_dict, (metrics_dict_list, images_dict_list)\n\n def load_pipeline(self, loaded_state: Dict[str, Any], load_step, checkpoint_dir) -> None:\n \"\"\"Load the checkpoint from the given path\n\n Args:\n loaded_state: pre-trained model state dict\n \"\"\"\n self.model.customized_load(load_step, checkpoint_dir)\n state = {key.replace(\"module.\", \"\"): value for key, value in loaded_state.items()}\n if self.test_mode == \"inference\":\n state.pop(\"datamanager.train_camera_optimizer.pose_adjustment\", None)\n state.pop(\"datamanager.train_ray_generator.image_coords\", None)\n state.pop(\"datamanager.train_ray_generator.pose_optimizer.pose_adjustment\", None)\n state.pop(\"datamanager.eval_ray_generator.image_coords\", None)\n state.pop(\"datamanager.eval_ray_generator.pose_optimizer.pose_adjustment\", None)\n # always pops out the buffer parts\n buffer_fields_list = []\n for k in state.keys():\n if k.split('.')[-1] == \"buffer_fields\":\n buffer_fields_list.append(k)\n if len(buffer_fields_list) > 0:\n print(f\"replace buffer fields with initial state\")\n print(buffer_fields_list)\n new_state = self.state_dict()\n for b in buffer_fields_list:\n state[b] = new_state[b]\n self.load_state_dict(state) # type: ignore\n \n def call_customized_save(self, step: int, checkpoint_dir) -> None:\n \"\"\"Call customized save function for the pipeline.\n\n Args:\n step: current iteration step\n training_config: training configuration\n \"\"\"\n self.model.customized_save(step, checkpoint_dir)\n\n def get_training_callbacks(\n self, training_callback_attributes: TrainingCallbackAttributes\n ) -> List[TrainingCallback]:\n \"\"\"Returns the training callbacks from both the Dataloader and the Model.\"\"\"\n datamanager_callbacks = self.datamanager.get_training_callbacks(training_callback_attributes)\n model_callbacks = self.model.get_training_callbacks(training_callback_attributes)\n callbacks = datamanager_callbacks + model_callbacks\n return callbacks\n\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n \"\"\"Get the param groups for the pipeline.\n\n Returns:\n A list of dictionaries containing the pipeline's param groups.\n \"\"\"\n datamanager_params = self.datamanager.get_param_groups()\n model_params = self.model.get_param_groups()\n # TODO(ethan): assert that key names don't overlap\n return {**datamanager_params, **model_params}"
},
{
"identifier": "profiler",
"path": "nerfstudio/utils/profiler.py",
"snippet": "CONSOLE = Console(width=120)\nPROFILER = []\ndef time_function(func: Callable) -> Callable:\n def wrapper(*args, **kwargs):\ndef flush_profiler(config: cfg.LoggingConfig):\ndef setup_profiler(config: cfg.LoggingConfig):\n def __init__(self, config: cfg.LoggingConfig):\n def update_time(self, func_name: str, start_time: float, end_time: float):\n def print_profile(self):\nclass Profiler:"
},
{
"identifier": "writer",
"path": "nerfstudio/utils/writer.py",
"snippet": "CONSOLE = Console(width=120)\nEVENT_WRITERS = []\nEVENT_STORAGE = []\nGLOBAL_BUFFER = {}\n ITER_TRAIN_TIME = \"Train Iter (time)\"\n TOTAL_TRAIN_TIME = \"Train Total (time)\"\n ITER_VIS_TIME = \"Viewer Rendering (time)\"\n ETA = \"ETA (time)\"\n TRAIN_RAYS_PER_SEC = \"Train Rays / Sec\"\n TEST_RAYS_PER_SEC = \"Test Rays / Sec\"\n VIS_RAYS_PER_SEC = \"Vis Rays / Sec\"\n CURR_TEST_PSNR = \"Test PSNR\"\n IMAGE = \"write_image\"\n PLOTLY = \"write_plotly\"\n SCALAR = \"write_scalar\"\n DICT = \"write_scalar_dict\"\n CONFIG = \"write_config\"\nclass EventName(enum.Enum):\nclass EventType(enum.Enum):\nclass Writer:\nclass TimeWriter:\nclass WandbWriter(Writer):\nclass TensorboardWriter(Writer):\nclass LocalWriter:\ndef put_image(name, image: TensorType[\"H\", \"W\", \"C\"], step: int):\ndef put_plotly(name: str, figure: Any, step: int = 0):\ndef put_scalar(name: str, scalar: Any, step: int):\ndef put_dict(name: str, scalar_dict: Dict[str, Any], step: int):\ndef put_config(name: str, config_dict: Dict[str, Any], step: int):\ndef put_time(name: str, duration: float, step: int, avg_over_steps: bool = True, update_eta: bool = False):\ndef write_out_storage():\ndef setup_local_writer(config: cfg.LoggingConfig, max_iter: int, banner_messages: Optional[List[str]] = None) -> None:\ndef setup_event_writer(config: cfg.Config, log_dir: Path) -> None:\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_scalar_dict(self, name: str, scalar_dict: Dict[str, Any], step: int) -> None:\n def __init__(self, writer, name, step=None, write=True):\n def __enter__(self):\n def __exit__(self, *args):\n def __init__(self, log_dir: Path, experiment_name: str):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def __init__(self, log_dir: Path):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int): # pylint: disable=unused-argument\ndef _cursorup(x: int):\ndef _format_time(seconds):\n def __init__(self, config: cfg.LocalWriterConfig, banner_messages: Optional[List[str]] = None):\n def write_stats_log(self, step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def _consolidate_events(self):\n def _update_header(self, latest_map, new_key):\n def _print_stats(self, latest_map, padding=\" \"):"
},
{
"identifier": "check_eval_enabled",
"path": "nerfstudio/utils/decorators.py",
"snippet": "def check_eval_enabled(func: Callable) -> Callable:\n \"\"\"Decorator: check if evaluation step is enabled\"\"\"\n\n def wrapper(self, *args, **kwargs):\n ret = None\n if self.config.is_wandb_enabled() or self.config.is_tensorboard_enabled():\n ret = func(self, *args, **kwargs)\n return ret\n\n return wrapper"
},
{
"identifier": "check_main_thread",
"path": "nerfstudio/utils/decorators.py",
"snippet": "def check_main_thread(func: Callable) -> Callable:\n \"\"\"Decorator: check if you are on main thread\"\"\"\n\n def wrapper(*args, **kwargs):\n ret = None\n if comms.is_main_process():\n ret = func(*args, **kwargs)\n return ret\n\n return wrapper"
},
{
"identifier": "check_viewer_enabled",
"path": "nerfstudio/utils/decorators.py",
"snippet": "def check_viewer_enabled(func: Callable) -> Callable:\n \"\"\"Decorator: check if viewer is enabled and only run on main process\"\"\"\n\n def wrapper(self, *args, **kwargs):\n ret = None\n if self.config.is_viewer_enabled() and comms.is_main_process():\n ret = func(self, *args, **kwargs)\n return ret\n\n return wrapper"
},
{
"identifier": "step_check",
"path": "nerfstudio/utils/misc.py",
"snippet": "def step_check(step, step_size, run_at_zero=False) -> bool:\n \"\"\"Returns true based on current step and step interval.\"\"\"\n if step_size == 0:\n return False\n return (run_at_zero or step != 0) and step % step_size == 0"
},
{
"identifier": "EventName",
"path": "nerfstudio/utils/writer.py",
"snippet": "class EventName(enum.Enum):\n \"\"\"Names of possible events that can be logged via Local Writer for convenience.\n see config/logging/default_logging.yaml\"\"\"\n\n ITER_TRAIN_TIME = \"Train Iter (time)\"\n TOTAL_TRAIN_TIME = \"Train Total (time)\"\n ITER_VIS_TIME = \"Viewer Rendering (time)\"\n ETA = \"ETA (time)\"\n TRAIN_RAYS_PER_SEC = \"Train Rays / Sec\"\n TEST_RAYS_PER_SEC = \"Test Rays / Sec\"\n VIS_RAYS_PER_SEC = \"Vis Rays / Sec\"\n CURR_TEST_PSNR = \"Test PSNR\""
},
{
"identifier": "TimeWriter",
"path": "nerfstudio/utils/writer.py",
"snippet": "class TimeWriter:\n \"\"\"Timer context manager that calculates duration around wrapped functions\"\"\"\n\n def __init__(self, writer, name, step=None, write=True):\n self.writer = writer\n self.name = name\n self.step = step\n self.write = write\n\n self.start: float = 0.0\n self.duration: float = 0.0\n\n def __enter__(self):\n torch.cuda.synchronize()\n self.start = time()\n return self\n\n def __exit__(self, *args):\n torch.cuda.synchronize()\n self.duration = time() - self.start\n update_step = self.step is not None\n if self.write:\n self.writer.put_time(\n name=self.name,\n duration=self.duration,\n step=self.step if update_step else GLOBAL_BUFFER[\"max_iter\"],\n avg_over_steps=update_step,\n update_eta=self.name == EventName.ITER_TRAIN_TIME,\n )"
},
{
"identifier": "viewer_utils",
"path": "nerfstudio/viewer/server/viewer_utils.py",
"snippet": "CONSOLE = Console(width=120)\n INIT = \"init\"\n RGB = \"rgb\"\n RGB_FINE = \"rgb_fine\"\n ACCUMULATION = \"accumulation\"\n ACCUMULATION_FINE = \"accumulation_fine\"\n INIT = \"init\"\n DEFAULT = \"default\"\n TURBO = \"turbo\"\n DEPTH = \"depth\"\n SEMANTIC = \"semantic\"\n BOOLEAN = \"boolean\"\ndef get_viewer_version() -> str:\ndef setup_viewer(config: cfg.ViewerConfig, log_filename: Path):\n def __init__(self, func):\n def __enter__(self):\n def __exit__(self, ext_type, exc_value, traceback):\n def __init__(self, state: \"ViewerState\", graph: Model, camera_ray_bundle: RayBundle):\n def run(self):\n def join(self, timeout=None):\n def __init__(self, state):\n def run(self):\n def __init__(self, config: cfg.ViewerConfig, log_filename: Path):\n def _pick_drawn_image_idxs(self, total_num: int) -> list[int]:\n def init_scene(self, dataset: InputDataset, start_train=True) -> None:\n def _check_camera_path_payload(self, trainer, step: int):\n def update_scene(self, trainer, step: int, graph: Model, num_rays_per_batch: int) -> None:\n def check_interrupt(self, frame, event, arg): # pylint: disable=unused-argument\n def _get_camera_object(self):\n def _apply_colormap(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6):\n def _send_output_to_viewer(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6):\n def _update_viewer_stats(self, render_time: float, num_rays: int, image_height: int, image_width: int) -> None:\n def _calculate_image_res(self, camera_object, is_training: bool) -> Optional[Tuple[int, int]]:\n def _process_invalid_output(self, output_type: str) -> str:\n def _render_image_in_viewer(self, camera_object, graph: Model, is_training: bool) -> None:\nclass OutputTypes(str, enum.Enum):\nclass ColormapTypes(str, enum.Enum):\nclass IOChangeException(Exception):\nclass SetTrace:\nclass RenderThread(threading.Thread):\nclass CheckThread(threading.Thread):\nclass ViewerState:"
},
{
"identifier": "check_load_step",
"path": "nerfstudio/utils/load_utils.py",
"snippet": "def check_load_step(config: Config):\n \"\"\"pre check whether load_dir exists and return load_step is specified. \"\"\"\n load_dir = config.trainer.load_dir\n if load_dir is None:\n load_dir_try = config.get_checkpoint_dir()\n if load_dir_try.exists():\n load_dir = load_dir_try\n if load_dir is not None:\n load_step = config.trainer.load_step\n if load_step is None:\n # NOTE: this is specific to the checkpoint name format\n load_step = sorted(int(x.replace('-', '.').split('.')[-2]) for x in os.listdir(load_dir))[-1]\n return load_step\n else:\n return None"
}
] | import dataclasses
import functools
import os
import time
import json
import torch
import plotly.graph_objects as go
from typing import Dict, List, Tuple
from numpy import isin
from pathlib import Path
from rich.console import Console
from torch.cuda.amp.grad_scaler import GradScaler
from typing_extensions import Literal
from nerfstudio.configs import base_config as cfg
from nerfstudio.engine.callbacks import (
TrainingCallback,
TrainingCallbackAttributes,
TrainingCallbackLocation,
)
from nerfstudio.engine.optimizers import Optimizers, setup_optimizers
from nerfstudio.pipelines.base_pipeline import VanillaPipeline
from nerfstudio.utils import profiler, writer
from nerfstudio.utils.decorators import (
check_eval_enabled,
check_main_thread,
check_viewer_enabled,
)
from nerfstudio.utils.misc import step_check
from nerfstudio.utils.writer import EventName, TimeWriter
from nerfstudio.viewer.server import viewer_utils
from nerfstudio.utils.load_utils import check_load_step | 10,195 | try:
self.viewer_state.update_scene(self, step, self.pipeline.model, num_rays_per_batch)
except RuntimeError:
time.sleep(0.03) # sleep to allow buffer to reset
assert self.viewer_state.vis is not None
self.viewer_state.vis["renderingState/log_errors"].write(
"Error: GPU out of memory. Reduce resolution to prevent viewer from crashing."
)
@check_viewer_enabled
def _update_viewer_rays_per_sec(self, train_t: TimeWriter, vis_t: TimeWriter, step: int):
"""Performs update on rays/sec calclation for training
Args:
train_t: timer object carrying time to execute total training iteration
vis_t: timer object carrying time to execute visualization step
step: current step
"""
train_num_rays_per_batch = self.config.pipeline.datamanager.train_num_rays_per_batch
writer.put_time(
name=EventName.TRAIN_RAYS_PER_SEC,
duration=train_num_rays_per_batch / (train_t.duration - vis_t.duration),
step=step,
avg_over_steps=True,
)
def _load_checkpoint(self) -> None:
"""Helper function to load pipeline and optimizer from prespecified checkpoint"""
load_dir = self.config.trainer.load_dir
# try to find checkpoint dir
if load_dir is None:
load_dir_try = self.config.get_checkpoint_dir()
if load_dir_try.exists():
load_dir = load_dir_try
if load_dir is not None:
load_step = self.config.trainer.load_step
if load_step is None:
print("Loading latest checkpoint from load_dir")
# NOTE: this is specific to the checkpoint name format
# load_step = sorted(int(x[x.find("-") + 1 : x.find(".")]) for x in os.listdir(load_dir))[-1]
load_step = sorted(int(x.replace('-', '.').split('.')[-2]) for x in os.listdir(load_dir))[-1]
load_path = Path(load_dir) / Path(f"model.{load_step:09d}.ckpt")
if not load_path.exists():
load_path = Path(load_dir) / Path(f'step-{load_step:09d}.ckpt') # old format
assert load_path.exists(), f"Checkpoint {load_path} does not exist"
loaded_state = torch.load(load_path, map_location="cpu")
self._start_step = loaded_state["step"] + 1
# load the checkpoints for pipeline, optimizers, and gradient scalar
self.pipeline.load_pipeline(loaded_state["pipeline"], load_step, load_dir)
self.optimizers.load_optimizers(loaded_state["optimizers"])
if "schedulers" in loaded_state and self.config.trainer.load_scheduler:
self.optimizers.load_schedulers(loaded_state["schedulers"])
self.grad_scaler.load_state_dict(loaded_state["scalers"])
CONSOLE.print(f"done loading checkpoint from {load_path}, starting from step {self._start_step}")
else:
CONSOLE.print("No checkpoints to load, training from scratch")
@check_main_thread
def save_checkpoint(self, step: int) -> None:
"""Save the model and optimizers
Args:
step: number of steps in training for given checkpoint
"""
# possibly make the checkpoint directory
if not self.checkpoint_dir.exists():
self.checkpoint_dir.mkdir(parents=True, exist_ok=True)
# save the checkpoint
ckpt_path = self.checkpoint_dir / f"model.{step:09d}.ckpt"
torch.save(
{
"step": step,
"pipeline": self.pipeline.module.state_dict() # type: ignore
if hasattr(self.pipeline, "module")
else self.pipeline.state_dict(),
"optimizers": {k: v.state_dict() for (k, v) in self.optimizers.optimizers.items()},
"schedulers": {k: v.state_dict() for (k, v) in self.optimizers.schedulers.items()},
"scalers": self.grad_scaler.state_dict(),
},
ckpt_path,
)
self.pipeline.call_customized_save(step=step, checkpoint_dir=self.checkpoint_dir)
# possibly delete old checkpoints
if self.config.trainer.save_only_latest_checkpoint:
# delete everything else in the checkpoint folder
for f in self.checkpoint_dir.glob("*"):
if int(str(f).split('.')[-2]) != step:
f.unlink()
@profiler.time_function
def train_iteration(self, step: int) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
"""Run one iteration with a batch of inputs. Returns dictionary of model losses.
Args:
step: Current training step.
"""
self.optimizers.zero_grad_all()
cpu_or_cuda_str = self.device.split(":")[0]
with torch.autocast(device_type=cpu_or_cuda_str, enabled=self.mixed_precision):
_, loss_dict, metrics_dict = self.pipeline.get_train_loss_dict(step=step)
loss = functools.reduce(torch.add, loss_dict.values())
self.grad_scaler.scale(loss).backward() # type: ignore
# try:
# torch.nn.utils.clip_grad_norm_(self.pipeline.model.parameters(), 10.0, error_if_nonfinite=True)
# # torch.nn.utils.clip_grad_value_(self.pipeline.model.parameters(), 10.0)
# except Exception as e:
# CONSOLE.print(f"Error: {e}")
# CONSOLE.print("Error: gradient clipping detected nonfinite number, skipping updating. ")
# self.optimizers.scheduler_step_all(step)
# self.optimizers.zero_grad_all()
# return loss, loss_dict, metrics_dict
self.optimizers.optimizer_scaler_step_all(self.grad_scaler)
self.grad_scaler.update()
self.optimizers.scheduler_step_all(step)
# Merging loss and metrics dict into a single output.
return loss, loss_dict, metrics_dict
| # zinyou note:
# trainer in principle should not be modified.
# modification should be only within pipeline or lower level: model and data manager.
# Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code to train model.
"""
from __future__ import annotations
CONSOLE = Console(width=120)
class Trainer:
"""Trainer class
Args:
config: The configuration object.
local_rank: Local rank of the process.
world_size: World size of the process.
Attributes:
config: The configuration object.
local_rank: Local rank of the process.
world_size: World size of the process.
device: The device to run the training on.
pipeline: The pipeline object.
optimizers: The optimizers object.
callbacks: The callbacks object.
"""
pipeline: VanillaPipeline
optimizers: Optimizers
callbacks: List[TrainingCallback]
def __init__(self, config: cfg.Config, local_rank: int = 0, world_size: int = 1):
self.config = config
self.local_rank = local_rank
self.world_size = world_size
self.device = "cpu" if world_size == 0 else f"cuda:{local_rank}"
self.mixed_precision = self.config.trainer.mixed_precision
if self.device == "cpu":
self.mixed_precision = False
CONSOLE.print("Mixed precision is disabled for CPU training.")
self._start_step = 0
# optimizers
self.grad_scaler = GradScaler(enabled=self.mixed_precision)
self.base_dir = config.get_base_dir()
# directory to save checkpoints
self.checkpoint_dir = config.get_checkpoint_dir()
CONSOLE.log(f"Saving checkpoints to: {self.checkpoint_dir}")
# set up viewer if enabled
viewer_log_path = self.base_dir / config.viewer.relative_log_filename
self.viewer_state, banner_messages = None, None
if self.config.is_viewer_enabled() and local_rank == 0:
self.viewer_state, banner_messages = viewer_utils.setup_viewer(config.viewer, log_filename=viewer_log_path)
self._check_viewer_warnings()
# set up writers/profilers if enabled
writer_log_path = self.base_dir / config.logging.relative_log_dir
writer.setup_event_writer(config, log_dir=writer_log_path)
writer.setup_local_writer(
config.logging, max_iter=config.trainer.max_num_iterations, banner_messages=banner_messages
)
writer.put_config(name="config", config_dict=dataclasses.asdict(config), step=0)
profiler.setup_profiler(config.logging)
def setup(self, test_mode: Literal["test", "val", "inference"] = "val"):
"""Setup the Trainer by calling other setup functions.
Args:
test_mode:
'val': loads train/val datasets into memory
'test': loads train/test datset into memory
'inference': does not load any dataset into memory
"""
self.pipeline = self.config.pipeline.setup(
device=self.device, test_mode=test_mode, world_size=self.world_size, local_rank=self.local_rank, load_step = check_load_step(self.config),
)
self.optimizers = setup_optimizers(self.config, self.pipeline.get_param_groups())
self._load_checkpoint()
self.training_attributes = TrainingCallbackAttributes(
optimizers=self.optimizers, # type: ignore
grad_scaler=self.grad_scaler, # type: ignore
pipeline=self.pipeline, # type: ignore
config=self.config.trainer, # type: ignore
)
self.callbacks = self.pipeline.get_training_callbacks(self.training_attributes)
def train(self) -> None:
"""Train the model."""
assert self.pipeline.datamanager.train_dataset is not None, "Missing DatsetInputs"
self._init_viewer_state()
# plotly scene
if self.config.trainer.visualize_scene:
scene_plotly_data = self.pipeline.get_scene_plotly_figure()
if scene_plotly_data:
fig = go.Figure(data=scene_plotly_data)
writer.put_plotly(name="scene", figure=fig)
CONSOLE.log("Scene plotly is uploaded.")
self.training_time = 0.0
with TimeWriter(writer, EventName.TOTAL_TRAIN_TIME):
num_iterations = self.config.trainer.max_num_iterations
step = self._start_step
self._update_viewer_state(step)
for step in range(self._start_step, num_iterations):
with TimeWriter(writer, EventName.ITER_TRAIN_TIME, step=step) as train_t:
self.pipeline.train()
# training callbacks before the training iteration
for callback in self.callbacks:
callback.run_callback_at_location(
step, location=TrainingCallbackLocation.BEFORE_TRAIN_ITERATION
)
start_time = time.time()
# time the forward pass
loss, loss_dict, metrics_dict = self.train_iteration(step)
self.training_time += time.time() - start_time
# training callbacks after the training iteration
for callback in self.callbacks:
callback.run_callback_at_location(step, location=TrainingCallbackLocation.AFTER_TRAIN_ITERATION)
# Skip the first two steps to avoid skewed timings that break the viewer rendering speed estimate.
if step > 1:
writer.put_time(
name=EventName.TRAIN_RAYS_PER_SEC,
duration=self.config.pipeline.datamanager.train_num_rays_per_batch / train_t.duration,
step=step,
avg_over_steps=True,
)
self._update_viewer_state(step)
# a batch of train rays
if step_check(step, self.config.logging.steps_per_log, run_at_zero=True):
writer.put_scalar(name="Train Loss", scalar=loss, step=step)
writer.put_dict(name="Train Loss Dict", scalar_dict=loss_dict, step=step)
writer.put_dict(name="Train Metrics Dict", scalar_dict=metrics_dict, step=step)
if step_check(step, self.config.trainer.steps_per_save):
self.save_checkpoint(step)
self.eval_iteration(step)
writer.write_out_storage()
# save checkpoint at the end of training
self.save_checkpoint(step)
self.save_running_performance()
CONSOLE.rule()
CONSOLE.print("[bold green]:tada: :tada: :tada: Training Finished :tada: :tada: :tada:", justify="center")
if not self.config.viewer.quit_on_train_completion:
CONSOLE.print("Use ctrl+c to quit", justify="center")
self._always_render(step)
@check_main_thread
def _always_render(self, step):
if self.config.is_viewer_enabled():
while True:
self.viewer_state.vis["renderingState/isTraining"].write(False)
self._update_viewer_state(step)
@check_main_thread
def _check_viewer_warnings(self) -> None:
"""Helper to print out any warnings regarding the way the viewer/loggers are enabled"""
if self.config.is_viewer_enabled():
string = (
"[NOTE] Not running eval iterations since only viewer is enabled."
" Use [yellow]--vis wandb[/yellow] or [yellow]--vis tensorboard[/yellow] to run with eval instead."
)
CONSOLE.print(f"{string}")
@check_main_thread
def save_running_performance(self):
output_path = self.checkpoint_dir / f"../running_performance.json"
performance_info = {
"n_parameters": self.pipeline.model.n_parameters() / 1024 / 1024,
"training_time": self.training_time,
}
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(json.dumps(performance_info, indent=2), "utf8")
CONSOLE.log(f'model parameters: {performance_info["n_parameters"]}, training time: {performance_info["training_time"]}')
@check_viewer_enabled
def _init_viewer_state(self) -> None:
"""Initializes viewer scene with given train dataset"""
assert self.viewer_state and self.pipeline.datamanager.train_dataset
self.viewer_state.init_scene(
dataset=self.pipeline.datamanager.train_dataset,
start_train=self.config.viewer.start_train,
)
if not self.config.viewer.start_train:
self._always_render(self._start_step)
@check_viewer_enabled
def _update_viewer_state(self, step: int):
"""Updates the viewer state by rendering out scene with current pipeline
Returns the time taken to render scene.
Args:
step: current train step
"""
assert self.viewer_state is not None
with TimeWriter(writer, EventName.ITER_VIS_TIME, step=step) as _:
num_rays_per_batch = self.config.pipeline.datamanager.train_num_rays_per_batch
try:
self.viewer_state.update_scene(self, step, self.pipeline.model, num_rays_per_batch)
except RuntimeError:
time.sleep(0.03) # sleep to allow buffer to reset
assert self.viewer_state.vis is not None
self.viewer_state.vis["renderingState/log_errors"].write(
"Error: GPU out of memory. Reduce resolution to prevent viewer from crashing."
)
@check_viewer_enabled
def _update_viewer_rays_per_sec(self, train_t: TimeWriter, vis_t: TimeWriter, step: int):
"""Performs update on rays/sec calclation for training
Args:
train_t: timer object carrying time to execute total training iteration
vis_t: timer object carrying time to execute visualization step
step: current step
"""
train_num_rays_per_batch = self.config.pipeline.datamanager.train_num_rays_per_batch
writer.put_time(
name=EventName.TRAIN_RAYS_PER_SEC,
duration=train_num_rays_per_batch / (train_t.duration - vis_t.duration),
step=step,
avg_over_steps=True,
)
def _load_checkpoint(self) -> None:
"""Helper function to load pipeline and optimizer from prespecified checkpoint"""
load_dir = self.config.trainer.load_dir
# try to find checkpoint dir
if load_dir is None:
load_dir_try = self.config.get_checkpoint_dir()
if load_dir_try.exists():
load_dir = load_dir_try
if load_dir is not None:
load_step = self.config.trainer.load_step
if load_step is None:
print("Loading latest checkpoint from load_dir")
# NOTE: this is specific to the checkpoint name format
# load_step = sorted(int(x[x.find("-") + 1 : x.find(".")]) for x in os.listdir(load_dir))[-1]
load_step = sorted(int(x.replace('-', '.').split('.')[-2]) for x in os.listdir(load_dir))[-1]
load_path = Path(load_dir) / Path(f"model.{load_step:09d}.ckpt")
if not load_path.exists():
load_path = Path(load_dir) / Path(f'step-{load_step:09d}.ckpt') # old format
assert load_path.exists(), f"Checkpoint {load_path} does not exist"
loaded_state = torch.load(load_path, map_location="cpu")
self._start_step = loaded_state["step"] + 1
# load the checkpoints for pipeline, optimizers, and gradient scalar
self.pipeline.load_pipeline(loaded_state["pipeline"], load_step, load_dir)
self.optimizers.load_optimizers(loaded_state["optimizers"])
if "schedulers" in loaded_state and self.config.trainer.load_scheduler:
self.optimizers.load_schedulers(loaded_state["schedulers"])
self.grad_scaler.load_state_dict(loaded_state["scalers"])
CONSOLE.print(f"done loading checkpoint from {load_path}, starting from step {self._start_step}")
else:
CONSOLE.print("No checkpoints to load, training from scratch")
@check_main_thread
def save_checkpoint(self, step: int) -> None:
"""Save the model and optimizers
Args:
step: number of steps in training for given checkpoint
"""
# possibly make the checkpoint directory
if not self.checkpoint_dir.exists():
self.checkpoint_dir.mkdir(parents=True, exist_ok=True)
# save the checkpoint
ckpt_path = self.checkpoint_dir / f"model.{step:09d}.ckpt"
torch.save(
{
"step": step,
"pipeline": self.pipeline.module.state_dict() # type: ignore
if hasattr(self.pipeline, "module")
else self.pipeline.state_dict(),
"optimizers": {k: v.state_dict() for (k, v) in self.optimizers.optimizers.items()},
"schedulers": {k: v.state_dict() for (k, v) in self.optimizers.schedulers.items()},
"scalers": self.grad_scaler.state_dict(),
},
ckpt_path,
)
self.pipeline.call_customized_save(step=step, checkpoint_dir=self.checkpoint_dir)
# possibly delete old checkpoints
if self.config.trainer.save_only_latest_checkpoint:
# delete everything else in the checkpoint folder
for f in self.checkpoint_dir.glob("*"):
if int(str(f).split('.')[-2]) != step:
f.unlink()
@profiler.time_function
def train_iteration(self, step: int) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
"""Run one iteration with a batch of inputs. Returns dictionary of model losses.
Args:
step: Current training step.
"""
self.optimizers.zero_grad_all()
cpu_or_cuda_str = self.device.split(":")[0]
with torch.autocast(device_type=cpu_or_cuda_str, enabled=self.mixed_precision):
_, loss_dict, metrics_dict = self.pipeline.get_train_loss_dict(step=step)
loss = functools.reduce(torch.add, loss_dict.values())
self.grad_scaler.scale(loss).backward() # type: ignore
# try:
# torch.nn.utils.clip_grad_norm_(self.pipeline.model.parameters(), 10.0, error_if_nonfinite=True)
# # torch.nn.utils.clip_grad_value_(self.pipeline.model.parameters(), 10.0)
# except Exception as e:
# CONSOLE.print(f"Error: {e}")
# CONSOLE.print("Error: gradient clipping detected nonfinite number, skipping updating. ")
# self.optimizers.scheduler_step_all(step)
# self.optimizers.zero_grad_all()
# return loss, loss_dict, metrics_dict
self.optimizers.optimizer_scaler_step_all(self.grad_scaler)
self.grad_scaler.update()
self.optimizers.scheduler_step_all(step)
# Merging loss and metrics dict into a single output.
return loss, loss_dict, metrics_dict
| @check_eval_enabled | 9 | 2023-12-15 20:07:22+00:00 | 12k |
Infleqtion/qLDPC | qldpc/codes.py | [
{
"identifier": "abstract",
"path": "qldpc/abstract.py",
"snippet": "DEFAULT_FIELD_ORDER = 2\nclass GroupMember(comb.Permutation):\nclass Group:\nclass Element:\nclass Protograph:\nclass TrivialGroup(Group):\nclass CyclicGroup(Group):\nclass DihedralGroup(Group):\nclass QuaternionGroup(Group):\n def __mul__(self, other: UnknownType) -> UnknownType:\n def __add__(self, other: UnknownType) -> UnknownType:\n def __lt__(self, other: GroupMember) -> bool:\n def __matmul__(self, other: GroupMember) -> GroupMember:\ndef default_lift(member: GroupMember) -> IntegerArray:\n def __init__(\n self, group: PermutationGroup, field: int | None = None, lift: Lift | None = None\n ) -> None:\n def __eq__(self, other: object) -> bool:\n def __mul__(self, other: Group) -> Group:\n def lift(member: GroupMember) -> galois.FieldArray:\n def __contains__(self, member: GroupMember) -> bool:\n def field(self) -> type[galois.FieldArray]:\n def order(self) -> int:\n def generators(self) -> Sequence[GroupMember]:\n def generate(self) -> Iterator[GroupMember]:\n def identity(self) -> GroupMember:\n def product(cls, *groups: Group, repeat: int = 1) -> Group:\n def lift(self, member: GroupMember) -> galois.FieldArray:\n def lift_dim(self) -> int:\n def table(self) -> IntegerArray:\n def from_table(\n cls,\n table: IntegerArray | Sequence[Sequence[int]],\n field: int | None = None,\n integer_lift: IntegerLift | None = None,\n ) -> Group:\n def lift(member: GroupMember) -> IntegerArray:\n def from_generators(\n cls, *generators: GroupMember, field: int | None = None, lift: Lift | None = None\n ) -> Group:\n def __init__(self, group: Group, *members: GroupMember):\n def __eq__(self, other: object) -> bool:\n def __iter__(self) -> Iterator[tuple[GroupMember, galois.FieldArray]]:\n def __add__(self, other: GroupMember | Element) -> Element:\n def __radd__(self, other: GroupMember) -> Element:\n def __mul__(self, other: int | GroupMember | Element) -> Element:\n def __rmul__(self, other: int | GroupMember) -> Element:\n def __neg__(self) -> Element:\n def __pow__(self, power: int) -> Element:\n def copy(self) -> Element:\n def field(self) -> type[galois.FieldArray]:\n def group(self) -> Group:\n def lift(self) -> galois.FieldArray:\n def zero(self) -> Element:\n def one(self) -> Element:\n def T(self) -> Element:\n def __init__(self, matrix: Protograph | ObjectMatrix) -> None:\n def __eq__(self, other: object) -> bool:\n def __rmul__(self, val: int) -> Protograph:\n def __mul__(self, val: int) -> Protograph:\n def matrix(self) -> npt.NDArray[np.object_]:\n def shape(self) -> tuple[int, ...]:\n def group(self) -> Group:\n def field(self) -> type[galois.FieldArray]:\n def lift(self) -> galois.FieldArray:\n def T(self) -> Protograph:\n def build(cls, group: Group, matrix: ObjectMatrix, *, field: int = 2) -> Protograph:\n def __init__(self, field: int | None = None) -> None:\n def to_protograph(\n cls, matrix: IntegerArray | Sequence[Sequence[int]], field: int | None = None\n ) -> Protograph:\n def __init__(self, order: int) -> None:\n def __init__(self, order: int) -> None:\n def __init__(self) -> None:\n def lift(member: int) -> IntegerArray:"
},
{
"identifier": "CayleyComplex",
"path": "qldpc/objects.py",
"snippet": "class CayleyComplex:\n \"\"\"Left-right Cayley complex, used for constructing quantum Tanner codes.\n\n A Cayley complex is a geometric structure built out of a two subsets A and B of a group G. The\n subsets respectively act on elements of G from the left and right, and must be symmetric, which\n is to say (for example) that a ∈ A iff a^-1 ∈ A. To avoid constructing a complex that factors\n into disconnected pieces, we can define G as the group generated by all elements of A and B.\n\n The generating data (A,B) is used to build vertices V, edges E, and faces F as follows:\n - vertices are members of G,\n - edges have the form (g, ag) and (g, gb), and\n - faces f(g,a,b) have the form {g, ab, gb, agb}:\n\n g → gb\n ↓ ↓\n ag → agb\n\n The complex (V,E,F) is in turn used to construct two bipartite directed graphs:\n - subgraph_0 with edges ( g, f(g,a,b)), and\n - subgraph_1 with edges (ag, f(g,a,b)).\n These graphs are used to construct classical Tanner codes that serve as the X and Z sectors of a\n quantum CSS code (namely, a quantum Tanner code).\n\n There are, however, two complications to keep in mind. First, in order for the faces to be non\n degenerate (that is, for each face to contain four vertices), the generating data (A,B) must\n satisfy the Total No Conjugacy condition:\n\n [1] ag != gb for all g,a,b in (G,A,B).\n\n Second, in order to construct a valid quantum Tanner code out of subgraph_0 and subgraph_1, the\n graph (V,E) must be bipartite, V = V_0 ∪ V_1, such that (for example) nodes {g,agb} are in one\n partition, while nodes {ag,gb} are in the other partition. The nodes V_i are then used as the\n sources of subgraph_i. The graph (V,E) is bipartite if:\n\n [2] The Cayley graphs (G;A) and (G;B) both are bipartite.\n\n The Cayley graphs (G;A) and (G;B) are graphs whose\n - vertices are members of G, and\n - edges are pairs of vertices connected by A or B, as in (g, ag) or (g, gb).\n\n If both [1] and [2] are satisfied, when we can construct a Cayley complex out of (G,A,B)\n directly, which we call a \"rank-0\" complex.\n\n If [1] is satisfied but [2] is not, then we can construct a \"rank-1\" complex that enforces\n requirement [2] by taking the double cover of G and modifying members of A and B as:\n - G --> G ⊗ {0,1},\n - a --> (a,1), and\n - b --> (b,1),\n where (a,1) acts on (g,i) as (a,1) * (g,i) = (ag,i+1), and similarly (b,1) * (g,i) = (gb,i+1).\n\n If requirement [1] is not satisfied, then we can construct a \"rank-2\" complex that enforces both\n [1] and [2] by taking the quadruple cover of G and modifying members of A and B as:\n - G --> G ⊗ {0,1} ⊗ {0,1},\n - a --> (a,1,0), and\n - b --> (b,0,1),\n where similarly to before (a,1,0) * (g,i,j) = (ag,i+1,j) and (b,0,1) * (g,i,j) = (gb,i,j+1).\n\n References:\n - https://arxiv.org/abs/2202.13641\n - https://arxiv.org/abs/2206.07571\n - https://www.youtube.com/watch?v=orWcstqWGGo\n \"\"\"\n\n # generating data\n subset_a: set[abstract.GroupMember]\n subset_b: set[abstract.GroupMember]\n group: abstract.Group\n\n # rank and graph (vertices and edges)\n rank: int\n graph: nx.Graph\n faces: set[frozenset[abstract.GroupMember]]\n\n # subgraphs used for a quantum Tanner code\n subgraph_0: nx.DiGraph\n subgraph_1: nx.DiGraph\n\n def __init__(\n self,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember] | None = None,\n *,\n rank: int | None = None,\n ) -> None:\n \"\"\"Construct a left-right Cayley complex.\"\"\"\n assert not rank or 0 <= rank <= 2\n if subset_b is None:\n subset_b = subset_a\n subset_a = set(subset_a)\n subset_b = set(subset_b)\n assert all(~member in subset_a for member in subset_a)\n assert all(~member in subset_b for member in subset_b)\n\n # identify the group generated by the provided (sub)sets\n group = abstract.Group.from_generators(*subset_a, *subset_b)\n\n # determine the rank of this complex\n min_rank = CayleyComplex.get_min_rank(group, subset_a, subset_b)\n if rank is not None and rank < min_rank:\n error = f\"Cannot set CayleyComplex rank to {rank} (min_rank: {min_rank})\"\n raise ValueError(error)\n self.rank = min_rank if rank is None else rank\n\n # take the double cover(s) of the group, if necessary, and save the generating data\n identity, shift = abstract.CyclicGroup(2).generate()\n if self.rank == 2:\n shift_a = shift @ identity\n shift_b = identity @ shift\n elif self.rank == 1:\n shift_a = shift_b = shift\n else: # self.rank == 0\n shift_a = shift_b = abstract.TrivialGroup().identity\n self.subset_a = set(aa @ shift_a for aa in subset_a)\n self.subset_b = set(bb @ shift_b for bb in subset_b)\n self.group = abstract.Group.from_generators(*self.subset_a, *self.subset_b)\n\n # construct the vertices, edges, and faces of this complex\n self.graph = nx.Graph()\n self.faces = set()\n for gg, aa, bb in itertools.product(self.group.generate(), self.subset_a, self.subset_b):\n aa_gg, gg_bb, aa_gg_bb = aa * gg, gg * bb, aa * gg * bb\n face = frozenset([gg, aa_gg, gg_bb, aa_gg_bb])\n self.faces.add(face)\n self.graph.add_edge(gg, aa_gg)\n self.graph.add_edge(gg, gg_bb)\n self.graph.add_edge(aa_gg, aa_gg_bb)\n self.graph.add_edge(gg_bb, aa_gg_bb)\n\n # construct the subgraphs of the complex\n self.subgraph_0 = nx.DiGraph()\n self.subgraph_1 = nx.DiGraph()\n half_group, _ = nx.bipartite.sets(self.graph)\n for gg, aa, bb in itertools.product(half_group, self.subset_a, self.subset_b):\n aa_gg, gg_bb, aa_gg_bb = aa * gg, gg * bb, aa * gg * bb\n face = frozenset([gg, aa_gg, gg_bb, aa_gg_bb])\n self.subgraph_0.add_edge(gg, face, sort=(aa, bb))\n self.subgraph_1.add_edge(aa_gg, face, sort=(~aa, bb))\n\n @classmethod\n def get_min_rank(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> Literal[0, 1, 2]:\n \"\"\"Minimum rank of a Cayley complex built out of the given generating data.\"\"\"\n if not CayleyComplex.satisfies_total_no_conjugacy(group, subset_a, subset_b):\n return 2\n graph_a, graph_b = CayleyComplex.get_cayley_graphs(group, subset_a, subset_b)\n if not nx.is_bipartite(graph_a) or not nx.is_bipartite(graph_b):\n return 1\n return 0\n\n @classmethod\n def satisfies_total_no_conjugacy(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> bool:\n \"\"\"Check the Total No-Conjugacy condition: aa gg != gg bb for all gg, aa, bb.\"\"\"\n return all(\n aa * gg != gg * bb\n for gg, aa, bb in itertools.product(group.generate(), subset_a, subset_b)\n )\n\n @classmethod\n def get_cayley_graphs(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> tuple[nx.Graph, nx.Graph]:\n \"\"\"Cayley graphs for the left- and right-acting subsets.\"\"\"\n edges_a = [(gg, aa * gg) for gg in group.generate() for aa in subset_a]\n edges_b = [(gg, gg * bb) for gg in group.generate() for bb in subset_b]\n return nx.Graph(edges_a), nx.Graph(edges_b)"
},
{
"identifier": "Node",
"path": "qldpc/objects.py",
"snippet": "class Node:\n \"\"\"Node in a Tanner graph.\n\n A node essentially an integer index, together with a boolean flag to distinguish \"data\" node\n from a \"check\" node in an error-correcting code.\n \"\"\"\n\n index: int\n is_data: bool = True\n\n def __hash__(self) -> int:\n return hash((self.index, self.is_data))\n\n def __lt__(self, other: Node) -> bool:\n if self.is_data == other.is_data:\n return self.index < other.index\n return self.is_data # data bits \"precede\" check bits\n\n def __str__(self) -> str:\n tag = \"d\" if self.is_data else \"c\"\n return f\"{tag}_{self.index}\""
},
{
"identifier": "Pauli",
"path": "qldpc/objects.py",
"snippet": "class Pauli(enum.Enum):\n \"\"\"Pauli operators.\"\"\"\n\n I = (0, 0) # noqa: E741\n Z = (0, 1)\n X = (1, 0)\n Y = (1, 1)\n\n def __mul__(self, other: Pauli) -> Pauli:\n \"\"\"Product of two Pauli operators.\"\"\"\n val_x = (self.value[0] + other.value[0]) % 2\n val_z = (self.value[1] + other.value[1]) % 2\n return Pauli((val_x, val_z))\n\n def __invert__(self) -> Pauli:\n \"\"\"Hadamard-transform this Pauli operator.\"\"\"\n return Pauli(self.value[::-1])\n\n def __str__(self) -> str:\n if self == Pauli.I:\n return \"I\"\n elif self == Pauli.Z:\n return \"Z\"\n elif self == Pauli.X:\n return \"X\"\n return \"Y\"\n\n @classmethod\n def from_string(cls, string: str) -> Pauli:\n \"\"\"Build a Pauli operator from a string.\"\"\"\n if string == \"I\":\n return Pauli.I\n elif string == \"Z\":\n return Pauli.Z\n elif string == \"X\":\n return Pauli.X\n elif string == \"Y\":\n return Pauli.Y\n raise ValueError(f\"Invalid Pauli operator: {string}\")\n\n @property\n def index(self) -> int:\n \"\"\"Numerical index for Pauli operators.\"\"\"\n if self == Pauli.X:\n return 0\n if self == Pauli.Z:\n return 1\n raise AttributeError(f\"No index for {self}.\")"
},
{
"identifier": "QuditOperator",
"path": "qldpc/objects.py",
"snippet": "class QuditOperator:\n \"\"\"A qudit operator of the form X(val_x)*Z(val_z).\"\"\"\n\n def __init__(self, value: tuple[int, int] = (0, 0)) -> None:\n self.value = value\n\n def __eq__(self, other: object) -> bool:\n return isinstance(other, QuditOperator) and self.value == other.value\n\n def __invert__(self) -> QuditOperator:\n \"\"\"Fourier-transform this qudit operator.\"\"\"\n return QuditOperator(self.value[::-1])\n\n def __str__(self) -> str:\n val_x, val_z = self.value\n if not val_x and not val_z:\n return \"I\"\n if val_x == val_z:\n return f\"Y({val_z})\"\n ops = []\n if val_x:\n ops.append(f\"X({val_x})\")\n if val_z:\n ops.append(f\"Z({val_z})\")\n return \"*\".join(ops)\n\n @classmethod\n def from_string(cls, string: str) -> QuditOperator:\n \"\"\"Build a qudit operator from its string representation.\"\"\"\n if string == \"I\":\n return QuditOperator((0, 0))\n\n invalid_op = f\"Invalid qudit operator: {string}\"\n\n val_x, val_z = 0, 0\n factors = string.split(\"*\")\n if len(factors) > 2:\n raise ValueError(invalid_op)\n\n for factor in factors:\n pauli = factor[0]\n val_str = factor[2:-1]\n _factor = f\"{pauli}({val_str})\"\n if pauli not in \"XYZ\" or not val_str.isnumeric() or factor != _factor:\n raise ValueError(invalid_op)\n\n val = int(val_str)\n if pauli == \"X\":\n val_x = val\n elif pauli == \"Z\":\n val_z = val\n else: # pauli == \"Y\"\n val_x = val_z = val\n\n return QuditOperator((val_x, val_z))"
}
] | import abc
import functools
import itertools
import cachetools
import galois
import ldpc.mod2
import networkx as nx
import numpy as np
import numpy.typing as npt
import qldpc
from collections.abc import Collection, Iterable, Sequence
from typing import TYPE_CHECKING, Literal
from qldpc import abstract
from qldpc.objects import CayleyComplex, Node, Pauli, QuditOperator
from typing_extensions import Self | 7,861 | matrix = np.zeros((num_bits - 1, num_bits), dtype=int)
for row in range(num_bits - 1):
matrix[row, row] = 1
matrix[row, row + 1] = minus_one
return ClassicalCode(matrix, field)
@classmethod
def ring(cls, num_bits: int, field: int | None = None) -> ClassicalCode:
"""Construct a repetition code with periodic boundary conditions."""
minus_one = galois.GF(field or DEFAULT_FIELD_ORDER).characteristic - 1
matrix = np.zeros((num_bits, num_bits), dtype=int)
for row in range(num_bits):
matrix[row, row] = 1
matrix[row, (row + 1) % num_bits] = minus_one
return ClassicalCode(matrix, field)
@classmethod
def hamming(cls, rank: int, field: int | None = None) -> ClassicalCode:
"""Construct a hamming code of a given rank."""
field = field or DEFAULT_FIELD_ORDER
if field == 2:
# parity check matrix: columns = all nonzero bitstrings
bitstrings = list(itertools.product([0, 1], repeat=rank))
return ClassicalCode(np.array(bitstrings[1:]).T)
# More generally, columns = maximal set of nonzero, linearly independent strings.
# This is achieved by collecting together all strings whose first nonzero element is a 1.
strings = [
(0,) * top_row + (1,) + rest
for top_row in range(rank - 1, -1, -1)
for rest in itertools.product(range(field), repeat=rank - top_row - 1)
]
return ClassicalCode(np.array(strings).T, field=field)
# TODO: add more codes, particularly from code families that are useful for good quantum codes
# see https://mhostetter.github.io/galois/latest/api/#forward-error-correction
# TODO:
# - add method to convert a parity check matrix into standard form
# - see https://arxiv.org/abs/1101.1519
# - one method to compute "blocks" of standard form, one to return the matrix itself
# - add is_CSS method to figure out whether this is a CSS Code
# - see https://quantumcomputing.stackexchange.com/questions/15432/
# - also compute and store sub-codes, if CSS
# - also add QuditCode.to_CSS() -> CSSCode
class QuditCode(AbstractCode):
"""Quantum stabilizer code for Galois qudits, with dimension q = p^m for prime p and integer m.
The parity check matrix of a QuditCode has dimensions (num_checks, 2 * num_qudits), and can be
written as a block matrix in the form H = [H_x|H_z]. Each block has num_qudits columns.
The entries H_x[c, d] = r_x and H_z[c, d] = r_z iff check c addresses qudit d with the operator
X(r_x) * Z(r_z), where r_x, r_z range over the base field, and X(r), Z(r) are generalized Pauli
operators. Specifically:
- X(r) = sum_{j=0}^{q-1} |j+r><j| is a shift operator, and
- Z(r) = sum_{j=0}^{q-1} w^{j r} |j><j| is a phase operator, with w = exp(2 pi i / q).
Warning: here j, r, s, etc. not integers, but elements of the Galois field GF(q), which has
different rules for addition and multiplication when q is not a prime number.
Helpful lecture by Gottesman: https://www.youtube.com/watch?v=JWg4zrNAF-g
"""
@property
def num_checks(self) -> int:
"""Number of parity checks (stabilizers) in this code."""
return self.matrix.shape[0]
@property
def num_qudits(self) -> int:
"""Number of data qudits in this code."""
return self.matrix.shape[1] // 2
@property
def num_qubits(self) -> int:
"""Number of data qubits in this code."""
self._assert_qubit_code()
return self.num_qudits
def _assert_qubit_code(self) -> None:
if self._field_order != 2:
raise ValueError("Attempted to call a qubit-only method with a non-qubit code.")
@classmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix into a Tanner graph."""
graph = nx.DiGraph()
matrix = np.reshape(matrix, (len(matrix), 2, -1))
for row, col_xz, col in zip(*np.nonzero(matrix)):
node_check = Node(index=int(row), is_data=False)
node_qudit = Node(index=int(col), is_data=True)
graph.add_edge(node_check, node_qudit)
qudit_op = graph[node_check][node_qudit].get(QuditOperator, QuditOperator())
vals_xz = list(qudit_op.value)
vals_xz[col_xz] += int(matrix[row, col_xz, col])
graph[node_check][node_qudit][QuditOperator] = QuditOperator(tuple(vals_xz))
if isinstance(matrix, galois.FieldArray):
graph.order = type(matrix).order
return graph
@classmethod
def graph_to_matrix(cls, graph: nx.DiGraph) -> galois.FieldArray:
"""Convert a Tanner graph into a parity check matrix."""
num_qudits = sum(1 for node in graph.nodes() if node.is_data)
num_checks = len(graph.nodes()) - num_qudits
matrix = np.zeros((num_checks, 2, num_qudits), dtype=int)
for node_check, node_qudit, data in graph.edges(data=True):
matrix[node_check.index, :, node_qudit.index] = data[QuditOperator].value
field = graph.order if hasattr(graph, "order") else DEFAULT_FIELD_ORDER
return galois.GF(field)(matrix.reshape(num_checks, 2 * num_qudits))
def get_stabilizers(self) -> list[str]:
"""Stabilizers (checks) of this code, represented by strings."""
matrix = self.matrix.reshape(self.num_checks, 2, self.num_qudits)
stabilizers = []
for check in range(self.num_checks):
ops = []
for qudit in range(self.num_qudits):
| """Error correction code constructions
Copyright 2023 The qLDPC Authors and Infleqtion Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
if TYPE_CHECKING:
DEFAULT_FIELD_ORDER = abstract.DEFAULT_FIELD_ORDER
################################################################################
# template error correction code classes
class AbstractCode(abc.ABC):
"""Template class for error-correcting codes."""
_field_order: int
def __init__(
self,
matrix: Self | npt.NDArray[np.int_] | Sequence[Sequence[int]],
field: int | None = None,
) -> None:
"""Construct a code from a parity check matrix over a finite field.
The base field is taken to be F_2 by default.
"""
self._matrix: galois.FieldArray
if isinstance(matrix, type(self)):
self._field_order = matrix.field.order
if not (field is None or field == self._field_order):
raise ValueError(
f"Field argument {field} is inconsistent with the given code, which is defined"
f" over F_{self._field_order}"
)
self._matrix = matrix.matrix
elif isinstance(matrix, galois.FieldArray):
self._field_order = type(matrix).order
self._matrix = matrix
else:
self._field_order = field or DEFAULT_FIELD_ORDER
self._matrix = self.field(np.array(matrix))
@property
def field(self) -> type[galois.FieldArray]:
"""Base field over which this code is defined."""
return galois.GF(self._field_order)
@property
def matrix(self) -> galois.FieldArray:
"""Parity check matrix of this code."""
return self._matrix
@functools.cached_property
def graph(self) -> nx.DiGraph:
"""Tanner graph of this code."""
return self.matrix_to_graph(self.matrix)
@classmethod
@abc.abstractmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix into a Tanner graph."""
@classmethod
@abc.abstractmethod
def graph_to_matrix(cls, graph: nx.DiGraph) -> galois.FieldArray:
"""Convert a Tanner graph into a parity check matrix."""
class ClassicalCode(AbstractCode):
"""Classical linear error-correcting code over a finite field F_q.
A classical binary code C = {x} is a set of vectors x (with entries in F_q) called code words.
We consider only linear codes, for which any linear combination of code words is also code word.
Operationally, we define a classical code by a parity check matrix H with dimensions
(num_checks, num_bits). Each row of H represents a linear constraint (a "check") that code
words must satisfy. A vector x is a code word iff H @ x = 0.
"""
def __contains__(self, word: npt.NDArray[np.int_] | Sequence[int]) -> bool:
return not np.any(self.matrix @ self.field(word))
@classmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix H into a Tanner graph.
The Tanner graph is a bipartite graph with (num_checks, num_bits) vertices, respectively
identified with the checks and bits of the code. The check vertex c and the bit vertex b
share an edge iff c addresses b; that is, edge (c, b) is in the graph iff H[c, b] != 0.
"""
graph = nx.DiGraph()
for row, col in zip(*np.nonzero(matrix)):
node_c = Node(index=int(row), is_data=False)
node_d = Node(index=int(col), is_data=True)
graph.add_edge(node_c, node_d, val=matrix[row][col])
if isinstance(matrix, galois.FieldArray):
graph.order = type(matrix).order
return graph
@classmethod
def graph_to_matrix(cls, graph: nx.DiGraph) -> galois.FieldArray:
"""Convert a Tanner graph into a parity check matrix."""
num_bits = sum(1 for node in graph.nodes() if node.is_data)
num_checks = len(graph.nodes()) - num_bits
field = graph.order if hasattr(graph, "order") else DEFAULT_FIELD_ORDER
matrix = galois.GF(field).Zeros((num_checks, num_bits))
for node_c, node_b, data in graph.edges(data=True):
matrix[node_c.index, node_b.index] = data.get("val", 1)
return matrix
@functools.cached_property
def generator(self) -> galois.FieldArray:
"""Generator of this code: a matrix whose rows for a basis for code words."""
return self.matrix.null_space()
def words(self) -> galois.FieldArray:
"""Code words of this code."""
vectors = itertools.product(self.field.elements, repeat=self.generator.shape[0])
return self.field(list(vectors)) @ self.generator
def get_random_word(self) -> galois.FieldArray:
"""Random code word: a sum all generators with random field coefficients."""
return self.field.Random(self.generator.shape[0]) @ self.generator
def dual(self) -> ClassicalCode:
"""Dual to this code.
The dual code ~C is the set of bitstrings orthogonal to C:
~C = { x : x @ y = 0 for all y in C }.
The parity check matrix of ~C is equal to the generator of C.
"""
return ClassicalCode(self.generator, self._field_order)
def __invert__(self) -> ClassicalCode:
return self.dual()
@classmethod
def tensor_product(cls, code_a: ClassicalCode, code_b: ClassicalCode) -> ClassicalCode:
"""Tensor product C_a ⊗ C_b of two codes C_a and C_b.
Let G_a and G_b respectively denote the generators C_a and C_b.
Definition: C_a ⊗ C_b is the code whose generators are G_a ⊗ G_b.
Observation: G_a ⊗ G_b is the check matrix of ~(C_a ⊗ C_b).
We therefore construct ~(C_a ⊗ C_b) and return its dual ~~(C_a ⊗ C_b) = C_a ⊗ C_b.
"""
if not code_a._field_order == code_b._field_order:
raise ValueError("Cannot take tensor product of codes over different fields")
gen_a: npt.NDArray[np.int_] = code_a.generator
gen_b: npt.NDArray[np.int_] = code_b.generator
return ~ClassicalCode(np.kron(gen_a, gen_b))
@property
def num_checks(self) -> int:
"""Number of check bits in this code."""
return self._matrix.shape[0]
@property
def num_bits(self) -> int:
"""Number of data bits in this code."""
return self._matrix.shape[1]
@functools.cached_property
def rank(self) -> int:
"""Rank of this code's parity check matrix.
Equivalently, the number of linearly independent parity checks in this code.
"""
if self._field_order == 2:
return ldpc.mod2.rank(self._matrix)
return np.linalg.matrix_rank(self._matrix)
@property
def dimension(self) -> int:
"""The number of logical bits encoded by this code."""
return self.num_bits - self.rank
@functools.cache
def get_distance(self) -> int:
"""The distance of this code, or equivalently the minimal weight of a nonzero code word."""
words = self.words().view(np.ndarray)
return np.min(np.count_nonzero(words[1:], axis=1))
def get_code_params(self) -> tuple[int, int, int]:
"""Compute the parameters of this code: [n,k,d].
Here:
- n is the number of data bits
- k is the number of encoded ("logical") bits
- d is the code distance
"""
return self.num_bits, self.dimension, self.get_distance()
@classmethod
def random(cls, bits: int, checks: int, field: int | None = None) -> ClassicalCode:
"""Construct a random classical code with the given number of bits and nontrivial checks."""
if field is None:
field = DEFAULT_FIELD_ORDER
code_field = galois.GF(field)
rows, cols = checks, bits
matrix = code_field.Random((rows, cols))
for row in range(matrix.shape[0]):
if not matrix[row, :].any():
matrix[row, np.random.randint(cols)] = code_field.Random(low=1) # pragma: no cover
for col in range(matrix.shape[1]):
if not matrix[:, col].any():
matrix[np.random.randint(rows), col] = code_field.Random(low=1) # pragma: no cover
return ClassicalCode(matrix, field)
@classmethod
def repetition(cls, num_bits: int, field: int | None = None) -> ClassicalCode:
"""Construct a repetition code on the given number of bits."""
minus_one = galois.GF(field or DEFAULT_FIELD_ORDER).characteristic - 1
matrix = np.zeros((num_bits - 1, num_bits), dtype=int)
for row in range(num_bits - 1):
matrix[row, row] = 1
matrix[row, row + 1] = minus_one
return ClassicalCode(matrix, field)
@classmethod
def ring(cls, num_bits: int, field: int | None = None) -> ClassicalCode:
"""Construct a repetition code with periodic boundary conditions."""
minus_one = galois.GF(field or DEFAULT_FIELD_ORDER).characteristic - 1
matrix = np.zeros((num_bits, num_bits), dtype=int)
for row in range(num_bits):
matrix[row, row] = 1
matrix[row, (row + 1) % num_bits] = minus_one
return ClassicalCode(matrix, field)
@classmethod
def hamming(cls, rank: int, field: int | None = None) -> ClassicalCode:
"""Construct a hamming code of a given rank."""
field = field or DEFAULT_FIELD_ORDER
if field == 2:
# parity check matrix: columns = all nonzero bitstrings
bitstrings = list(itertools.product([0, 1], repeat=rank))
return ClassicalCode(np.array(bitstrings[1:]).T)
# More generally, columns = maximal set of nonzero, linearly independent strings.
# This is achieved by collecting together all strings whose first nonzero element is a 1.
strings = [
(0,) * top_row + (1,) + rest
for top_row in range(rank - 1, -1, -1)
for rest in itertools.product(range(field), repeat=rank - top_row - 1)
]
return ClassicalCode(np.array(strings).T, field=field)
# TODO: add more codes, particularly from code families that are useful for good quantum codes
# see https://mhostetter.github.io/galois/latest/api/#forward-error-correction
# TODO:
# - add method to convert a parity check matrix into standard form
# - see https://arxiv.org/abs/1101.1519
# - one method to compute "blocks" of standard form, one to return the matrix itself
# - add is_CSS method to figure out whether this is a CSS Code
# - see https://quantumcomputing.stackexchange.com/questions/15432/
# - also compute and store sub-codes, if CSS
# - also add QuditCode.to_CSS() -> CSSCode
class QuditCode(AbstractCode):
"""Quantum stabilizer code for Galois qudits, with dimension q = p^m for prime p and integer m.
The parity check matrix of a QuditCode has dimensions (num_checks, 2 * num_qudits), and can be
written as a block matrix in the form H = [H_x|H_z]. Each block has num_qudits columns.
The entries H_x[c, d] = r_x and H_z[c, d] = r_z iff check c addresses qudit d with the operator
X(r_x) * Z(r_z), where r_x, r_z range over the base field, and X(r), Z(r) are generalized Pauli
operators. Specifically:
- X(r) = sum_{j=0}^{q-1} |j+r><j| is a shift operator, and
- Z(r) = sum_{j=0}^{q-1} w^{j r} |j><j| is a phase operator, with w = exp(2 pi i / q).
Warning: here j, r, s, etc. not integers, but elements of the Galois field GF(q), which has
different rules for addition and multiplication when q is not a prime number.
Helpful lecture by Gottesman: https://www.youtube.com/watch?v=JWg4zrNAF-g
"""
@property
def num_checks(self) -> int:
"""Number of parity checks (stabilizers) in this code."""
return self.matrix.shape[0]
@property
def num_qudits(self) -> int:
"""Number of data qudits in this code."""
return self.matrix.shape[1] // 2
@property
def num_qubits(self) -> int:
"""Number of data qubits in this code."""
self._assert_qubit_code()
return self.num_qudits
def _assert_qubit_code(self) -> None:
if self._field_order != 2:
raise ValueError("Attempted to call a qubit-only method with a non-qubit code.")
@classmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix into a Tanner graph."""
graph = nx.DiGraph()
matrix = np.reshape(matrix, (len(matrix), 2, -1))
for row, col_xz, col in zip(*np.nonzero(matrix)):
node_check = Node(index=int(row), is_data=False)
node_qudit = Node(index=int(col), is_data=True)
graph.add_edge(node_check, node_qudit)
qudit_op = graph[node_check][node_qudit].get(QuditOperator, QuditOperator())
vals_xz = list(qudit_op.value)
vals_xz[col_xz] += int(matrix[row, col_xz, col])
graph[node_check][node_qudit][QuditOperator] = QuditOperator(tuple(vals_xz))
if isinstance(matrix, galois.FieldArray):
graph.order = type(matrix).order
return graph
@classmethod
def graph_to_matrix(cls, graph: nx.DiGraph) -> galois.FieldArray:
"""Convert a Tanner graph into a parity check matrix."""
num_qudits = sum(1 for node in graph.nodes() if node.is_data)
num_checks = len(graph.nodes()) - num_qudits
matrix = np.zeros((num_checks, 2, num_qudits), dtype=int)
for node_check, node_qudit, data in graph.edges(data=True):
matrix[node_check.index, :, node_qudit.index] = data[QuditOperator].value
field = graph.order if hasattr(graph, "order") else DEFAULT_FIELD_ORDER
return galois.GF(field)(matrix.reshape(num_checks, 2 * num_qudits))
def get_stabilizers(self) -> list[str]:
"""Stabilizers (checks) of this code, represented by strings."""
matrix = self.matrix.reshape(self.num_checks, 2, self.num_qudits)
stabilizers = []
for check in range(self.num_checks):
ops = []
for qudit in range(self.num_qudits): | val_x = matrix[check, Pauli.X.index, qudit] | 3 | 2023-12-19 22:29:42+00:00 | 12k |
amazon-science/c2f-seg | src/video_model.py | [
{
"identifier": "VQModel",
"path": "taming_src/taming_models.py",
"snippet": "class VQModel(nn.Module):\n def __init__(self, config):\n super(VQModel, self).__init__()\n self.config = config\n self.iteration = 0\n self.name = config.model_type\n self.m_path = os.path.join(config.path, self.name)\n self.eps = 1e-6\n\n self.ddconfig = config.model['params']['ddconfig']\n n_embed = config.model['params']['n_embed']\n embed_dim = config.model['params']['embed_dim']\n \n self.encoder = Encoder(self.ddconfig).to(config.device)\n self.decoder = Decoder(self.ddconfig).to(config.device)\n self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25).to(config.device).to(config.device)\n self.quant_conv = torch.nn.Conv2d(self.ddconfig[\"z_channels\"], embed_dim, 1).to(config.device)\n # self.quant_proj = torch.nn.Linear(self.ddconfig[\"z_channels\"], embed_dim).to(config.device)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, self.ddconfig[\"z_channels\"], 1).to(config.device)\n # self.pose_quant_proj = torch.nn.Linear(embed_dim, self.ddconfig[\"z_channels\"]).to(config.device)\n\n def encode(self, x, mask=None):\n h = self.encoder(x) # dim=256\n h = self.quant_conv(h) # dim=256\n if mask is not None:\n mask = F.max_pool2d(mask, kernel_size=int(mask.shape[2] / h.shape[2]),\n stride=int(mask.shape[2] / h.shape[2]))\n quant = quant * mask + h * (1 - mask)\n quant, emb_loss, info = self.quantize(h, mask)\n \n return quant, emb_loss, info\n\n def decode(self, quant):\n quant = self.post_quant_conv(quant) # dim: 256\n dec = self.decoder(quant)\n return dec\n\n def decode_code(self, code_b):\n quant_b = self.quantize.embed_code(code_b)\n dec = self.decode(quant_b)\n return dec\n\n def forward(self, x, mask=None):\n quant, diff, _ = self.encode(x, mask) # quant dim: 256\n\n dec = self.decode(quant)\n return dec, diff\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n def restore(self, ckpt_file, g_opt=None, d_opt=None):\n torch_init_model(self, ckpt_file, \"state_dict\")\n saving = torch.load(ckpt_file, map_location='cpu')\n if 'optimizer_states' in saving and g_opt is not None and d_opt is not None:\n opt_state = saving['optimizer_states']\n g_opt.load_state_dict(opt_state[0])\n d_opt.load_state_dict(opt_state[1])\n print(f\"Restored from {ckpt_file}\")\n return g_opt, d_opt\n\n def save(self, prefix=None, g_opt=None, d_opt=None):\n if prefix is not None:\n save_path = self.m_path + \"_{}.pth\".format(prefix)\n else:\n save_path = self.m_path + \".pth\"\n\n print('\\nsaving {} {}...\\n'.format(self.name, prefix))\n all_saving = {'state_dict': self.state_dict(),\n 'optimizer_states': [g_opt.state_dict(), d_opt.state_dict()]}\n torch.save(all_saving, save_path)"
},
{
"identifier": "MaskedTransformer",
"path": "src/video_component.py",
"snippet": "class MaskedTransformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n embedding_dim = config.n_embd\n num_embed = config.vocab_size+1\n self.conv_in = torch.nn.Conv2d(2048, embedding_dim//2, 3, padding=1)\n # z_embedding\n self.c_emb = nn.Embedding(num_embed, embedding_dim//4)\n self.z_emb = nn.Embedding(num_embed, embedding_dim//4)\n # posotion embedding\n self.pos_emb = nn.Embedding(config.sequence_length, embedding_dim)\n self.drop = nn.Dropout(config.embd_pdrop)\n # transformer\n self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)])\n # decoder head\n self.dec = Transformer_Prediction(config)\n # z dec and m dec\n self.m_dec = nn.Linear(embedding_dim, num_embed, bias=False)\n self.m_bias = nn.Parameter(torch.zeros(num_embed))\n\n self.sequence_length = config.sequence_length\n self.apply(self._init_weights)\n self.config = config\n self.window_len = int(self.config.window_length)\n\n def forward(self, img_feat, c_idx, z_idx, window_size=(12, 4, 4), mask=None):\n # img_feat: [B, 2048, 16, 16]\n # attn_map: [B, 1, 16, 16]\n i_embeddings = self.conv_in(img_feat) # [B, 768//2-1, 16, 16]\n i_embeddings = i_embeddings.flatten(2).transpose(-2, -1)\n # c and z embedding\n c_embeddings = self.c_emb(c_idx) # [B, 256, D//4]\n z_embeddings = self.z_emb(z_idx) # [B, 256, D//4]\n token_embeddings = torch.cat([i_embeddings, c_embeddings, z_embeddings], dim=2) # [B, 256, D]\n # add positional embeddings\n n_tokens = token_embeddings.shape[1] # 16 * 16\n position_ids = torch.arange(n_tokens, dtype=torch.long, device=z_idx.device)\n position_ids = position_ids.unsqueeze(0).repeat(z_idx.shape[0], 1) # [B, 256, 1]\n position_embeddings = self.pos_emb(position_ids) # [B, 256, D]\n\n x = self.drop(token_embeddings + position_embeddings)\n\n batch_size = token_embeddings.shape[0]\n mask = torch.ones(batch_size, 1, n_tokens, n_tokens).cuda()\n window_size = (self.window_len, 4, 4)\n\n for block in self.blocks:\n x = block(x, window_size=window_size, mask=mask)\n x = torch.roll(x, self.window_len//2, 0)\n\n total_shift_size = (self.window_len//2) * len(self.blocks)\n x = torch.roll(x, batch_size - total_shift_size%batch_size, 0)\n\n x = self.dec(x)\n logits_m = self.m_dec(x) + self.m_bias\n \n return logits_m\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)"
},
{
"identifier": "Resnet_Encoder",
"path": "src/video_component.py",
"snippet": "class Resnet_Encoder(nn.Module):\n def __init__(self):\n super(Resnet_Encoder, self).__init__()\n self.encoder = base_resnet()\n\n def forward(self, img):\n features = self.encoder(img)\n return features"
},
{
"identifier": "Refine_Module",
"path": "src/video_component.py",
"snippet": "class Refine_Module(nn.Module):\n def __init__(self):\n super(Refine_Module, self).__init__()\n # self.encoder = base_resnet()\n dim = 256 + 2\n self.conv_adapter = torch.nn.Conv2d(2048, 2048, 1)\n self.conv_in = torch.nn.Conv2d(2048, 256, 3, padding=1)\n self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)\n self.bn1 = torch.nn.BatchNorm2d(dim)\n\n self.lay2 = torch.nn.Conv2d(dim, 128, 3, padding=1)\n self.bn2 = torch.nn.BatchNorm2d(128)\n\n self.lay3 = torch.nn.Conv2d(128, 64, 3, padding=1)\n self.bn3 = torch.nn.BatchNorm2d(64)\n self.adapter1 = torch.nn.Conv2d(1024, 128, 1)\n\n # visible mask branch\n self.lay4_vm = torch.nn.Conv2d(64, 32, 3, padding=1)\n self.bn4_vm = torch.nn.BatchNorm2d(32)\n self.lay5_vm = torch.nn.Conv2d(32, 16, 3, padding=1)\n self.bn5_vm = torch.nn.BatchNorm2d(16)\n self.adapter2_vm = torch.nn.Conv2d(512, 64, 1)\n self.adapter3_vm = torch.nn.Conv2d(256, 32, 1)\n self.out_lay_vm = torch.nn.Conv2d(16, 1, 3, padding=1)\n \n # full mask branch\n self.lay4_am = torch.nn.Conv2d(64, 32, 3, padding=1)\n self.bn4_am = torch.nn.BatchNorm2d(32)\n self.lay5_am = torch.nn.Conv2d(32, 16, 3, padding=1)\n self.bn5_am = torch.nn.BatchNorm2d(16)\n self.adapter2_am = torch.nn.Conv2d(512, 64, 1)\n self.adapter3_am = torch.nn.Conv2d(256, 32, 1)\n self.out_lay_am = torch.nn.Conv2d(16, 1, 3, padding=1)\n \n def get_attn_map(self, feature, guidance):\n b,c,h,w = guidance.shape\n q = torch.flatten(guidance, start_dim=2)\n v = torch.flatten(feature, start_dim=2)\n\n k = v * q\n k = k.sum(dim=-1, keepdim=True) / (q.sum(dim=-1, keepdim=True) + 1e-6)\n attn = (k.transpose(-2, -1) @ v) / 1\n attn = F.softmax(attn, dim=-1)\n attn = attn.reshape(b, c, h, w)\n return attn\n \n def forward(self, features, coarse_mask):\n # features: [B, 2048, 16, 16]\n # attn_map: [B, 1, 16, 16]\n # coarse_mask: [B, 1, 256, 256]\n feat = self.conv_adapter(features[-1])\n coarse_mask = F.interpolate(coarse_mask, scale_factor=(1/16))\n attn_map = self.get_attn_map(feat, coarse_mask)\n x = self.conv_in(feat)\n x = torch.cat((x, attn_map, coarse_mask), dim=1)\n x = F.relu(self.bn1(self.lay1(x)))\n x = F.relu(self.bn2(self.lay2(x)))\n \n cur_feat = self.adapter1(features[-2])\n x = cur_feat + x\n x = F.interpolate(x, size=(32, 32), mode=\"nearest\")\n x = F.relu(self.bn3(self.lay3(x)))\n\n # TODO: visible mask branch\n cur_feat_vm = self.adapter2_vm(features[-3])\n x_vm = cur_feat_vm + x\n x_vm = F.interpolate(x_vm, size=(64, 64), mode=\"nearest\")\n x_vm = F.relu(self.bn4_vm(self.lay4_vm(x_vm)))\n\n cur_feat_vm = self.adapter3_vm(features[-4])\n x_vm = cur_feat_vm + x_vm\n x_vm = F.interpolate(x_vm, size=(128, 128), mode=\"nearest\")\n x_vm = F.relu(self.bn5_vm(self.lay5_vm(x_vm)))\n \n x_vm = self.out_lay_vm(x_vm)\n\n # TODO: full mask branch\n cur_feat_am = self.adapter2_am(features[-3])\n x_am = cur_feat_am + x\n x_am = F.interpolate(x_am, size=(64, 64), mode=\"nearest\")\n x_am = F.relu(self.bn4_am(self.lay4_am(x_am)))\n\n cur_feat_am = self.adapter3_am(features[-4])\n x_am = cur_feat_am + x_am\n x_am = F.interpolate(x_am, size=(128, 128), mode=\"nearest\")\n x_am = F.relu(self.bn5_am(self.lay5_am(x_am)))\n \n x_am = self.out_lay_am(x_am)\n\n return x_vm, x_am"
},
{
"identifier": "VGG19",
"path": "src/loss.py",
"snippet": "class VGG19(torch.nn.Module):\n def __init__(self, pretrained=True, vgg_norm=False):\n super(VGG19, self).__init__()\n self.vgg_norm = vgg_norm\n features = models.vgg19(pretrained=pretrained).features\n self.relu1_1 = torch.nn.Sequential()\n self.relu1_2 = torch.nn.Sequential()\n\n self.relu2_1 = torch.nn.Sequential()\n self.relu2_2 = torch.nn.Sequential()\n\n self.relu3_1 = torch.nn.Sequential()\n self.relu3_2 = torch.nn.Sequential()\n self.relu3_3 = torch.nn.Sequential()\n self.relu3_4 = torch.nn.Sequential()\n\n self.relu4_1 = torch.nn.Sequential()\n self.relu4_2 = torch.nn.Sequential()\n self.relu4_3 = torch.nn.Sequential()\n self.relu4_4 = torch.nn.Sequential()\n\n self.relu5_1 = torch.nn.Sequential()\n self.relu5_2 = torch.nn.Sequential()\n self.relu5_3 = torch.nn.Sequential()\n self.relu5_4 = torch.nn.Sequential()\n\n for x in range(2):\n self.relu1_1.add_module(str(x), features[x])\n\n for x in range(2, 4):\n self.relu1_2.add_module(str(x), features[x])\n\n for x in range(4, 7):\n self.relu2_1.add_module(str(x), features[x])\n\n for x in range(7, 9):\n self.relu2_2.add_module(str(x), features[x])\n\n for x in range(9, 12):\n self.relu3_1.add_module(str(x), features[x])\n\n for x in range(12, 14):\n self.relu3_2.add_module(str(x), features[x])\n\n for x in range(14, 16):\n self.relu3_3.add_module(str(x), features[x])\n\n for x in range(16, 18):\n self.relu3_4.add_module(str(x), features[x])\n\n for x in range(18, 21):\n self.relu4_1.add_module(str(x), features[x])\n\n for x in range(21, 23):\n self.relu4_2.add_module(str(x), features[x])\n\n for x in range(23, 25):\n self.relu4_3.add_module(str(x), features[x])\n\n for x in range(25, 27):\n self.relu4_4.add_module(str(x), features[x])\n\n for x in range(27, 30):\n self.relu5_1.add_module(str(x), features[x])\n\n for x in range(30, 32):\n self.relu5_2.add_module(str(x), features[x])\n\n for x in range(32, 34):\n self.relu5_3.add_module(str(x), features[x])\n\n for x in range(34, 36):\n self.relu5_4.add_module(str(x), features[x])\n\n # don't need the gradients, just want the features\n for param in self.parameters():\n param.requires_grad = False\n\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n\n def forward(self, x):\n if self.vgg_norm:\n x = (x + 1) / 2 # -1~1 --> 0~1\n # 由0~1重新归一化\n mean = torch.as_tensor(self.mean, dtype=x.dtype, device=x.device)\n std = torch.as_tensor(self.std, dtype=x.dtype, device=x.device)\n x.sub_(mean[None,:, None, None]).div_(std[None,:, None, None])\n\n relu1_1 = self.relu1_1(x)\n relu1_2 = self.relu1_2(relu1_1)\n\n relu2_1 = self.relu2_1(relu1_2)\n relu2_2 = self.relu2_2(relu2_1)\n\n relu3_1 = self.relu3_1(relu2_2)\n relu3_2 = self.relu3_2(relu3_1)\n relu3_3 = self.relu3_3(relu3_2)\n relu3_4 = self.relu3_4(relu3_3)\n\n relu4_1 = self.relu4_1(relu3_4)\n relu4_2 = self.relu4_2(relu4_1)\n relu4_3 = self.relu4_3(relu4_2)\n relu4_4 = self.relu4_4(relu4_3)\n\n relu5_1 = self.relu5_1(relu4_4)\n relu5_2 = self.relu5_2(relu5_1)\n relu5_3 = self.relu5_3(relu5_2)\n relu5_4 = self.relu5_4(relu5_3)\n\n out = {\n 'relu1_1': relu1_1,\n 'relu1_2': relu1_2,\n\n 'relu2_1': relu2_1,\n 'relu2_2': relu2_2,\n\n 'relu3_1': relu3_1,\n 'relu3_2': relu3_2,\n 'relu3_3': relu3_3,\n 'relu3_4': relu3_4,\n\n 'relu4_1': relu4_1,\n 'relu4_2': relu4_2,\n 'relu4_3': relu4_3,\n 'relu4_4': relu4_4,\n\n 'relu5_1': relu5_1,\n 'relu5_2': relu5_2,\n 'relu5_3': relu5_3,\n 'relu5_4': relu5_4,\n }\n return out"
},
{
"identifier": "PerceptualLoss",
"path": "src/loss.py",
"snippet": "class PerceptualLoss(nn.Module):\n r\"\"\"\n Perceptual loss, VGG-based\n https://arxiv.org/abs/1603.08155\n https://github.com/dxyang/StyleTransfer/blob/master/utils.py\n \"\"\"\n\n def __init__(self, vgg, weights=[1.0, 1.0, 1.0, 1.0, 1.0], reduction='mean'):\n super(PerceptualLoss, self).__init__()\n # self.add_module('vgg', VGG19())\n self.vgg = vgg\n self.reduction = reduction\n self.criterion = torch.nn.L1Loss(reduction=reduction)\n self.weights = weights\n\n def __call__(self, x, y):\n # Compute features\n x_vgg, y_vgg = self.vgg(x), self.vgg(y)\n\n if self.reduction == 'mean':\n content_loss = 0.0\n content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])\n content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])\n content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])\n content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])\n content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])\n elif self.reduction == 'none':\n content_loss = []\n content_loss.append(self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1']))\n content_loss.append(self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1']))\n content_loss.append(self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1']))\n content_loss.append(self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1']))\n content_loss.append(self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1']))\n else:\n raise NotImplementedError\n\n return content_loss"
},
{
"identifier": "AdamW",
"path": "utils/pytorch_optimization.py",
"snippet": "class AdamW(Optimizer):\n \"\"\" Implements Adam algorithm with weight decay fix.\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)\n super().__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\"Adam does not support sparse gradients, please consider SparseAdam instead\")\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n state[\"step\"] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n # exp_avg.mul_(beta1).add_(1.0 - beta1, grad)\n # exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(grad, alpha = 1.0 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value = 1.0 - beta2)\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n step_size = group[\"lr\"]\n if group[\"correct_bias\"]: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state[\"step\"]\n bias_correction2 = 1.0 - beta2 ** state[\"step\"]\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n # p.data.addcdiv_(-step_size, exp_avg, denom)\n p.data.addcdiv_(exp_avg, denom, value = -step_size)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n if group[\"weight_decay\"] > 0.0:\n p.data.add_(p.data, alpha = -group[\"lr\"] * group[\"weight_decay\"])\n\n return loss"
},
{
"identifier": "get_linear_schedule_with_warmup",
"path": "utils/pytorch_optimization.py",
"snippet": "def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):\n \"\"\" Create a schedule with a learning rate that decreases linearly after\n linearly increasing during a warmup period.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n return max(\n 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))\n )\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)"
},
{
"identifier": "torch_show_all_params",
"path": "utils/utils.py",
"snippet": "def torch_show_all_params(model):\n params = list(model.parameters())\n k = 0\n for i in params:\n l = 1\n for j in i.size():\n l *= j\n k = k + l\n return k"
},
{
"identifier": "torch_init_model",
"path": "utils/utils.py",
"snippet": "def torch_init_model(model, init_checkpoint, key):\n state_dict = torch.load(init_checkpoint, map_location='cpu')[key]\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n\n load(model, prefix='')\n \n print(\"missing keys:{}\".format(missing_keys))\n print('unexpected keys:{}'.format(unexpected_keys))\n print('error msgs:{}'.format(error_msgs))"
},
{
"identifier": "Config",
"path": "utils/utils.py",
"snippet": "class Config(object):\n def __init__(self, config_path):\n with open(config_path, 'r') as f:\n self._yaml = f.read()\n self._dict = yaml.load(self._yaml, Loader=yaml.SafeLoader)\n self._dict['path'] = os.path.dirname(config_path)\n\n def __getattr__(self, name):\n if self._dict.get(name) is not None:\n return self._dict[name]\n\n return None\n\n def print(self):\n print('Model configurations:')\n print('---------------------------------')\n print(self._yaml)\n print('')\n print('---------------------------------')\n print('')"
},
{
"identifier": "video_iou",
"path": "utils/evaluation.py",
"snippet": "def video_iou(pred, labels):\n e = 1e-6\n pred = (pred>0.5).float()\n labels = (labels>0.5).float()\n intersection = pred * labels\n union = (pred + labels) - intersection\n iou = intersection.sum(-1).sum(-1) / (union.sum(-1).sum(-1)+ e)\n return iou"
},
{
"identifier": "CrossEntropyLoss",
"path": "utils/loss.py",
"snippet": "class CrossEntropyLoss(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n\n Equation: y = (1 - epsilon) * y + epsilon / K.\n\n Args:\n - num_classes (int): number of classes\n - epsilon (float): weight\n - use_gpu (bool): whether to use gpu devices\n - label_smooth (bool): whether to apply label smoothing, if False, epsilon = 0\n \"\"\"\n def __init__(self, num_classes, epsilon=0.1, device=None, label_smooth=True):\n super(CrossEntropyLoss, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon if label_smooth else 0\n self.device = device\n if device is None:\n self.logsoftmax = nn.LogSoftmax(dim=1)\n else:\n self.logsoftmax = nn.LogSoftmax(dim=1).to(device)\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n - inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)\n - targets: ground truth labels with shape (num_classes)\n \"\"\"\n log_probs = self.logsoftmax(inputs)\n targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)\n if self.device is not None:\n targets = targets.to(self.device)\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (- targets * log_probs).mean(0).sum()\n return loss"
}
] | import os
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torchvision import transforms
from taming_src.taming_models import VQModel
from src.video_component import MaskedTransformer, Resnet_Encoder, Refine_Module
from src.loss import VGG19, PerceptualLoss
from utils.pytorch_optimization import AdamW, get_linear_schedule_with_warmup
from utils.utils import torch_show_all_params, torch_init_model
from utils.utils import Config
from utils.evaluation import video_iou
from utils.loss import CrossEntropyLoss
from tqdm import tqdm | 8,251 |
class C2F_Seg(nn.Module):
def __init__(self, config, g_path, mode, logger=None, save_eval_dict={}):
super(C2F_Seg, self).__init__()
self.config = config
self.iteration = 0
self.sample_iter = 0
self.name = config.model_type
# load g model for mask
self.g_config = Config(os.path.join(g_path, 'vqgan_{}.yml'.format(config.dataset)))
self.g_path = os.path.join(g_path, self.g_config.model_type)
self.root_path = config.path
self.transformer_path = os.path.join(config.path, self.name)
# self.refine_path = os.path.join(config.path, "Refine")
self.trans_size = config.trans_size
self.mode = mode
self.save_eval_dict = save_eval_dict
self.eps = 1e-6
self.train_sample_iters = config.train_sample_iters
self.g_model = VQModel(self.g_config).to(config.device)
self.img_encoder = Resnet_Encoder().to(config.device)
self.refine_module = Refine_Module().to(config.device)
self.transformer = MaskedTransformer(config).to(config.device)
self.g_model.eval()
self.refine_criterion = nn.BCELoss()
self.criterion = CrossEntropyLoss(num_classes=config.vocab_size+1, device=config.device)
if config.train_with_dec:
if not config.gumbel_softmax:
self.temperature = nn.Parameter(torch.tensor([config.tp], dtype=torch.float32),
requires_grad=True).to(config.device)
if config.use_vgg:
vgg = VGG19(pretrained=True, vgg_norm=config.vgg_norm).to(config.device)
vgg.eval()
reduction = 'mean' if config.balanced_loss is False else 'none'
self.perceptual_loss = PerceptualLoss(vgg, weights=config.vgg_weights,
reduction=reduction).to(config.device)
else:
self.perceptual_loss = None
if config.init_gpt_with_vqvae:
self.transformer.z_emb.weight = self.g_model.quantize.embedding.weight
if logger is not None:
logger.info('Gen Parameters:{}'.format(torch_show_all_params(self.g_model)))
logger.info('Transformer Parameters:{}'.format(torch_show_all_params(self.transformer)))
else:
print('Gen Parameters:{}'.format(torch_show_all_params(self.g_model)))
print('Transformer Parameters:{}'.format(torch_show_all_params(self.transformer)))
# loss
no_decay = ['bias', 'ln1.bias', 'ln1.weight', 'ln2.bias', 'ln2.weight']
ignored_param = ['z_emb.weight', 'c_emb.weight']
param_optimizer = self.transformer.named_parameters()
param_optimizer_encoder = self.img_encoder.named_parameters()
param_optimizer_refine= self.refine_module.named_parameters()
optimizer_parameters = [
{'params': [p for n, p in param_optimizer if not any([nd in n for nd in no_decay])],
'weight_decay': config.weight_decay},
{'params': [p for n, p in param_optimizer if any([nd in n for nd in no_decay])],
'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer_encoder], 'weight_decay': config.weight_decay},
{'params': [p for n, p in param_optimizer_refine], 'weight_decay': config.weight_decay},
]
self.opt = AdamW(params=optimizer_parameters,
lr=float(config.lr), betas=(config.beta1, config.beta2))
|
class C2F_Seg(nn.Module):
def __init__(self, config, g_path, mode, logger=None, save_eval_dict={}):
super(C2F_Seg, self).__init__()
self.config = config
self.iteration = 0
self.sample_iter = 0
self.name = config.model_type
# load g model for mask
self.g_config = Config(os.path.join(g_path, 'vqgan_{}.yml'.format(config.dataset)))
self.g_path = os.path.join(g_path, self.g_config.model_type)
self.root_path = config.path
self.transformer_path = os.path.join(config.path, self.name)
# self.refine_path = os.path.join(config.path, "Refine")
self.trans_size = config.trans_size
self.mode = mode
self.save_eval_dict = save_eval_dict
self.eps = 1e-6
self.train_sample_iters = config.train_sample_iters
self.g_model = VQModel(self.g_config).to(config.device)
self.img_encoder = Resnet_Encoder().to(config.device)
self.refine_module = Refine_Module().to(config.device)
self.transformer = MaskedTransformer(config).to(config.device)
self.g_model.eval()
self.refine_criterion = nn.BCELoss()
self.criterion = CrossEntropyLoss(num_classes=config.vocab_size+1, device=config.device)
if config.train_with_dec:
if not config.gumbel_softmax:
self.temperature = nn.Parameter(torch.tensor([config.tp], dtype=torch.float32),
requires_grad=True).to(config.device)
if config.use_vgg:
vgg = VGG19(pretrained=True, vgg_norm=config.vgg_norm).to(config.device)
vgg.eval()
reduction = 'mean' if config.balanced_loss is False else 'none'
self.perceptual_loss = PerceptualLoss(vgg, weights=config.vgg_weights,
reduction=reduction).to(config.device)
else:
self.perceptual_loss = None
if config.init_gpt_with_vqvae:
self.transformer.z_emb.weight = self.g_model.quantize.embedding.weight
if logger is not None:
logger.info('Gen Parameters:{}'.format(torch_show_all_params(self.g_model)))
logger.info('Transformer Parameters:{}'.format(torch_show_all_params(self.transformer)))
else:
print('Gen Parameters:{}'.format(torch_show_all_params(self.g_model)))
print('Transformer Parameters:{}'.format(torch_show_all_params(self.transformer)))
# loss
no_decay = ['bias', 'ln1.bias', 'ln1.weight', 'ln2.bias', 'ln2.weight']
ignored_param = ['z_emb.weight', 'c_emb.weight']
param_optimizer = self.transformer.named_parameters()
param_optimizer_encoder = self.img_encoder.named_parameters()
param_optimizer_refine= self.refine_module.named_parameters()
optimizer_parameters = [
{'params': [p for n, p in param_optimizer if not any([nd in n for nd in no_decay])],
'weight_decay': config.weight_decay},
{'params': [p for n, p in param_optimizer if any([nd in n for nd in no_decay])],
'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer_encoder], 'weight_decay': config.weight_decay},
{'params': [p for n, p in param_optimizer_refine], 'weight_decay': config.weight_decay},
]
self.opt = AdamW(params=optimizer_parameters,
lr=float(config.lr), betas=(config.beta1, config.beta2)) | self.sche = get_linear_schedule_with_warmup(self.opt, num_warmup_steps=config.warmup_iters, | 7 | 2023-12-21 04:25:47+00:00 | 12k |
huahuahuage/Bert-VITS2-Speech | onnx_infer/text/chinese.py | [
{
"identifier": "punctuation",
"path": "onnx_infer/text/symbols.py",
"snippet": ""
},
{
"identifier": "ToneSandhi",
"path": "onnx_infer/text/chinese_tone_sandhi.py",
"snippet": "class ToneSandhi:\r\n def __init__(self):\r\n self.must_neural_tone_words = {\r\n \"麻烦\",\r\n \"麻利\",\r\n \"鸳鸯\",\r\n \"高粱\",\r\n \"骨头\",\r\n \"骆驼\",\r\n \"马虎\",\r\n \"首饰\",\r\n \"馒头\",\r\n \"馄饨\",\r\n \"风筝\",\r\n \"难为\",\r\n \"队伍\",\r\n \"阔气\",\r\n \"闺女\",\r\n \"门道\",\r\n \"锄头\",\r\n \"铺盖\",\r\n \"铃铛\",\r\n \"铁匠\",\r\n \"钥匙\",\r\n \"里脊\",\r\n \"里头\",\r\n \"部分\",\r\n \"那么\",\r\n \"道士\",\r\n \"造化\",\r\n \"迷糊\",\r\n \"连累\",\r\n \"这么\",\r\n \"这个\",\r\n \"运气\",\r\n \"过去\",\r\n \"软和\",\r\n \"转悠\",\r\n \"踏实\",\r\n \"跳蚤\",\r\n \"跟头\",\r\n \"趔趄\",\r\n \"财主\",\r\n \"豆腐\",\r\n \"讲究\",\r\n \"记性\",\r\n \"记号\",\r\n \"认识\",\r\n \"规矩\",\r\n \"见识\",\r\n \"裁缝\",\r\n \"补丁\",\r\n \"衣裳\",\r\n \"衣服\",\r\n \"衙门\",\r\n \"街坊\",\r\n \"行李\",\r\n \"行当\",\r\n \"蛤蟆\",\r\n \"蘑菇\",\r\n \"薄荷\",\r\n \"葫芦\",\r\n \"葡萄\",\r\n \"萝卜\",\r\n \"荸荠\",\r\n \"苗条\",\r\n \"苗头\",\r\n \"苍蝇\",\r\n \"芝麻\",\r\n \"舒服\",\r\n \"舒坦\",\r\n \"舌头\",\r\n \"自在\",\r\n \"膏药\",\r\n \"脾气\",\r\n \"脑袋\",\r\n \"脊梁\",\r\n \"能耐\",\r\n \"胳膊\",\r\n \"胭脂\",\r\n \"胡萝\",\r\n \"胡琴\",\r\n \"胡同\",\r\n \"聪明\",\r\n \"耽误\",\r\n \"耽搁\",\r\n \"耷拉\",\r\n \"耳朵\",\r\n \"老爷\",\r\n \"老实\",\r\n \"老婆\",\r\n \"老头\",\r\n \"老太\",\r\n \"翻腾\",\r\n \"罗嗦\",\r\n \"罐头\",\r\n \"编辑\",\r\n \"结实\",\r\n \"红火\",\r\n \"累赘\",\r\n \"糨糊\",\r\n \"糊涂\",\r\n \"精神\",\r\n \"粮食\",\r\n \"簸箕\",\r\n \"篱笆\",\r\n \"算计\",\r\n \"算盘\",\r\n \"答应\",\r\n \"笤帚\",\r\n \"笑语\",\r\n \"笑话\",\r\n \"窟窿\",\r\n \"窝囊\",\r\n \"窗户\",\r\n \"稳当\",\r\n \"稀罕\",\r\n \"称呼\",\r\n \"秧歌\",\r\n \"秀气\",\r\n \"秀才\",\r\n \"福气\",\r\n \"祖宗\",\r\n \"砚台\",\r\n \"码头\",\r\n \"石榴\",\r\n \"石头\",\r\n \"石匠\",\r\n \"知识\",\r\n \"眼睛\",\r\n \"眯缝\",\r\n \"眨巴\",\r\n \"眉毛\",\r\n \"相声\",\r\n \"盘算\",\r\n \"白净\",\r\n \"痢疾\",\r\n \"痛快\",\r\n \"疟疾\",\r\n \"疙瘩\",\r\n \"疏忽\",\r\n \"畜生\",\r\n \"生意\",\r\n \"甘蔗\",\r\n \"琵琶\",\r\n \"琢磨\",\r\n \"琉璃\",\r\n \"玻璃\",\r\n \"玫瑰\",\r\n \"玄乎\",\r\n \"狐狸\",\r\n \"状元\",\r\n \"特务\",\r\n \"牲口\",\r\n \"牙碜\",\r\n \"牌楼\",\r\n \"爽快\",\r\n \"爱人\",\r\n \"热闹\",\r\n \"烧饼\",\r\n \"烟筒\",\r\n \"烂糊\",\r\n \"点心\",\r\n \"炊帚\",\r\n \"灯笼\",\r\n \"火候\",\r\n \"漂亮\",\r\n \"滑溜\",\r\n \"溜达\",\r\n \"温和\",\r\n \"清楚\",\r\n \"消息\",\r\n \"浪头\",\r\n \"活泼\",\r\n \"比方\",\r\n \"正经\",\r\n \"欺负\",\r\n \"模糊\",\r\n \"槟榔\",\r\n \"棺材\",\r\n \"棒槌\",\r\n \"棉花\",\r\n \"核桃\",\r\n \"栅栏\",\r\n \"柴火\",\r\n \"架势\",\r\n \"枕头\",\r\n \"枇杷\",\r\n \"机灵\",\r\n \"本事\",\r\n \"木头\",\r\n \"木匠\",\r\n \"朋友\",\r\n \"月饼\",\r\n \"月亮\",\r\n \"暖和\",\r\n \"明白\",\r\n \"时候\",\r\n \"新鲜\",\r\n \"故事\",\r\n \"收拾\",\r\n \"收成\",\r\n \"提防\",\r\n \"挖苦\",\r\n \"挑剔\",\r\n \"指甲\",\r\n \"指头\",\r\n \"拾掇\",\r\n \"拳头\",\r\n \"拨弄\",\r\n \"招牌\",\r\n \"招呼\",\r\n \"抬举\",\r\n \"护士\",\r\n \"折腾\",\r\n \"扫帚\",\r\n \"打量\",\r\n \"打算\",\r\n \"打点\",\r\n \"打扮\",\r\n \"打听\",\r\n \"打发\",\r\n \"扎实\",\r\n \"扁担\",\r\n \"戒指\",\r\n \"懒得\",\r\n \"意识\",\r\n \"意思\",\r\n \"情形\",\r\n \"悟性\",\r\n \"怪物\",\r\n \"思量\",\r\n \"怎么\",\r\n \"念头\",\r\n \"念叨\",\r\n \"快活\",\r\n \"忙活\",\r\n \"志气\",\r\n \"心思\",\r\n \"得罪\",\r\n \"张罗\",\r\n \"弟兄\",\r\n \"开通\",\r\n \"应酬\",\r\n \"庄稼\",\r\n \"干事\",\r\n \"帮手\",\r\n \"帐篷\",\r\n \"希罕\",\r\n \"师父\",\r\n \"师傅\",\r\n \"巴结\",\r\n \"巴掌\",\r\n \"差事\",\r\n \"工夫\",\r\n \"岁数\",\r\n \"屁股\",\r\n \"尾巴\",\r\n \"少爷\",\r\n \"小气\",\r\n \"小伙\",\r\n \"将就\",\r\n \"对头\",\r\n \"对付\",\r\n \"寡妇\",\r\n \"家伙\",\r\n \"客气\",\r\n \"实在\",\r\n \"官司\",\r\n \"学问\",\r\n \"学生\",\r\n \"字号\",\r\n \"嫁妆\",\r\n \"媳妇\",\r\n \"媒人\",\r\n \"婆家\",\r\n \"娘家\",\r\n \"委屈\",\r\n \"姑娘\",\r\n \"姐夫\",\r\n \"妯娌\",\r\n \"妥当\",\r\n \"妖精\",\r\n \"奴才\",\r\n \"女婿\",\r\n \"头发\",\r\n \"太阳\",\r\n \"大爷\",\r\n \"大方\",\r\n \"大意\",\r\n \"大夫\",\r\n \"多少\",\r\n \"多么\",\r\n \"外甥\",\r\n \"壮实\",\r\n \"地道\",\r\n \"地方\",\r\n \"在乎\",\r\n \"困难\",\r\n \"嘴巴\",\r\n \"嘱咐\",\r\n \"嘟囔\",\r\n \"嘀咕\",\r\n \"喜欢\",\r\n \"喇嘛\",\r\n \"喇叭\",\r\n \"商量\",\r\n \"唾沫\",\r\n \"哑巴\",\r\n \"哈欠\",\r\n \"哆嗦\",\r\n \"咳嗽\",\r\n \"和尚\",\r\n \"告诉\",\r\n \"告示\",\r\n \"含糊\",\r\n \"吓唬\",\r\n \"后头\",\r\n \"名字\",\r\n \"名堂\",\r\n \"合同\",\r\n \"吆喝\",\r\n \"叫唤\",\r\n \"口袋\",\r\n \"厚道\",\r\n \"厉害\",\r\n \"千斤\",\r\n \"包袱\",\r\n \"包涵\",\r\n \"匀称\",\r\n \"勤快\",\r\n \"动静\",\r\n \"动弹\",\r\n \"功夫\",\r\n \"力气\",\r\n \"前头\",\r\n \"刺猬\",\r\n \"刺激\",\r\n \"别扭\",\r\n \"利落\",\r\n \"利索\",\r\n \"利害\",\r\n \"分析\",\r\n \"出息\",\r\n \"凑合\",\r\n \"凉快\",\r\n \"冷战\",\r\n \"冤枉\",\r\n \"冒失\",\r\n \"养活\",\r\n \"关系\",\r\n \"先生\",\r\n \"兄弟\",\r\n \"便宜\",\r\n \"使唤\",\r\n \"佩服\",\r\n \"作坊\",\r\n \"体面\",\r\n \"位置\",\r\n \"似的\",\r\n \"伙计\",\r\n \"休息\",\r\n \"什么\",\r\n \"人家\",\r\n \"亲戚\",\r\n \"亲家\",\r\n \"交情\",\r\n \"云彩\",\r\n \"事情\",\r\n \"买卖\",\r\n \"主意\",\r\n \"丫头\",\r\n \"丧气\",\r\n \"两口\",\r\n \"东西\",\r\n \"东家\",\r\n \"世故\",\r\n \"不由\",\r\n \"不在\",\r\n \"下水\",\r\n \"下巴\",\r\n \"上头\",\r\n \"上司\",\r\n \"丈夫\",\r\n \"丈人\",\r\n \"一辈\",\r\n \"那个\",\r\n \"菩萨\",\r\n \"父亲\",\r\n \"母亲\",\r\n \"咕噜\",\r\n \"邋遢\",\r\n \"费用\",\r\n \"冤家\",\r\n \"甜头\",\r\n \"介绍\",\r\n \"荒唐\",\r\n \"大人\",\r\n \"泥鳅\",\r\n \"幸福\",\r\n \"熟悉\",\r\n \"计划\",\r\n \"扑腾\",\r\n \"蜡烛\",\r\n \"姥爷\",\r\n \"照顾\",\r\n \"喉咙\",\r\n \"吉他\",\r\n \"弄堂\",\r\n \"蚂蚱\",\r\n \"凤凰\",\r\n \"拖沓\",\r\n \"寒碜\",\r\n \"糟蹋\",\r\n \"倒腾\",\r\n \"报复\",\r\n \"逻辑\",\r\n \"盘缠\",\r\n \"喽啰\",\r\n \"牢骚\",\r\n \"咖喱\",\r\n \"扫把\",\r\n \"惦记\",\r\n }\r\n self.must_not_neural_tone_words = {\r\n \"男子\",\r\n \"女子\",\r\n \"分子\",\r\n \"原子\",\r\n \"量子\",\r\n \"莲子\",\r\n \"石子\",\r\n \"瓜子\",\r\n \"电子\",\r\n \"人人\",\r\n \"虎虎\",\r\n }\r\n self.punc = \":,;。?!“”‘’':,;.?!\"\r\n\r\n # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041\r\n # e.g.\r\n # word: \"家里\"\r\n # pos: \"s\"\r\n # finals: ['ia1', 'i3']\r\n def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:\r\n # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺\r\n for j, item in enumerate(word):\r\n if (\r\n j - 1 >= 0\r\n and item == word[j - 1]\r\n and pos[0] in {\"n\", \"v\", \"a\"}\r\n and word not in self.must_not_neural_tone_words\r\n ):\r\n finals[j] = finals[j][:-1] + \"5\"\r\n ge_idx = word.find(\"个\")\r\n if len(word) >= 1 and word[-1] in \"吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶\":\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n elif len(word) >= 1 and word[-1] in \"的地得\":\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n # e.g. 走了, 看着, 去过\r\n # elif len(word) == 1 and word in \"了着过\" and pos in {\"ul\", \"uz\", \"ug\"}:\r\n # finals[-1] = finals[-1][:-1] + \"5\"\r\n elif (\r\n len(word) > 1\r\n and word[-1] in \"们子\"\r\n and pos in {\"r\", \"n\"}\r\n and word not in self.must_not_neural_tone_words\r\n ):\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n # e.g. 桌上, 地下, 家里\r\n elif len(word) > 1 and word[-1] in \"上下里\" and pos in {\"s\", \"l\", \"f\"}:\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n # e.g. 上来, 下去\r\n elif len(word) > 1 and word[-1] in \"来去\" and word[-2] in \"上下进出回过起开\":\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n # 个做量词\r\n elif (\r\n ge_idx >= 1\r\n and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in \"几有两半多各整每做是\")\r\n ) or word == \"个\":\r\n finals[ge_idx] = finals[ge_idx][:-1] + \"5\"\r\n else:\r\n if (\r\n word in self.must_neural_tone_words\r\n or word[-2:] in self.must_neural_tone_words\r\n ):\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n\r\n word_list = self._split_word(word)\r\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\r\n for i, word in enumerate(word_list):\r\n # conventional neural in Chinese\r\n if (\r\n word in self.must_neural_tone_words\r\n or word[-2:] in self.must_neural_tone_words\r\n ):\r\n finals_list[i][-1] = finals_list[i][-1][:-1] + \"5\"\r\n finals = sum(finals_list, [])\r\n return finals\r\n\r\n def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:\r\n # e.g. 看不懂\r\n if len(word) == 3 and word[1] == \"不\":\r\n finals[1] = finals[1][:-1] + \"5\"\r\n else:\r\n for i, char in enumerate(word):\r\n # \"不\" before tone4 should be bu2, e.g. 不怕\r\n if char == \"不\" and i + 1 < len(word) and finals[i + 1][-1] == \"4\":\r\n finals[i] = finals[i][:-1] + \"2\"\r\n return finals\r\n\r\n def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:\r\n # \"一\" in number sequences, e.g. 一零零, 二一零\r\n if word.find(\"一\") != -1 and all(\r\n [item.isnumeric() for item in word if item != \"一\"]\r\n ):\r\n return finals\r\n # \"一\" between reduplication words should be yi5, e.g. 看一看\r\n elif len(word) == 3 and word[1] == \"一\" and word[0] == word[-1]:\r\n finals[1] = finals[1][:-1] + \"5\"\r\n # when \"一\" is ordinal word, it should be yi1\r\n elif word.startswith(\"第一\"):\r\n finals[1] = finals[1][:-1] + \"1\"\r\n else:\r\n for i, char in enumerate(word):\r\n if char == \"一\" and i + 1 < len(word):\r\n # \"一\" before tone4 should be yi2, e.g. 一段\r\n if finals[i + 1][-1] == \"4\":\r\n finals[i] = finals[i][:-1] + \"2\"\r\n # \"一\" before non-tone4 should be yi4, e.g. 一天\r\n else:\r\n # \"一\" 后面如果是标点,还读一声\r\n if word[i + 1] not in self.punc:\r\n finals[i] = finals[i][:-1] + \"4\"\r\n return finals\r\n\r\n def _split_word(self, word: str) -> List[str]:\r\n word_list = jieba.cut_for_search(word)\r\n word_list = sorted(word_list, key=lambda i: len(i), reverse=False)\r\n first_subword = word_list[0]\r\n first_begin_idx = word.find(first_subword)\r\n if first_begin_idx == 0:\r\n second_subword = word[len(first_subword) :]\r\n new_word_list = [first_subword, second_subword]\r\n else:\r\n second_subword = word[: -len(first_subword)]\r\n new_word_list = [second_subword, first_subword]\r\n return new_word_list\r\n\r\n def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:\r\n if len(word) == 2 and self._all_tone_three(finals):\r\n finals[0] = finals[0][:-1] + \"2\"\r\n elif len(word) == 3:\r\n word_list = self._split_word(word)\r\n if self._all_tone_three(finals):\r\n # disyllabic + monosyllabic, e.g. 蒙古/包\r\n if len(word_list[0]) == 2:\r\n finals[0] = finals[0][:-1] + \"2\"\r\n finals[1] = finals[1][:-1] + \"2\"\r\n # monosyllabic + disyllabic, e.g. 纸/老虎\r\n elif len(word_list[0]) == 1:\r\n finals[1] = finals[1][:-1] + \"2\"\r\n else:\r\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\r\n if len(finals_list) == 2:\r\n for i, sub in enumerate(finals_list):\r\n # e.g. 所有/人\r\n if self._all_tone_three(sub) and len(sub) == 2:\r\n finals_list[i][0] = finals_list[i][0][:-1] + \"2\"\r\n # e.g. 好/喜欢\r\n elif (\r\n i == 1\r\n and not self._all_tone_three(sub)\r\n and finals_list[i][0][-1] == \"3\"\r\n and finals_list[0][-1][-1] == \"3\"\r\n ):\r\n finals_list[0][-1] = finals_list[0][-1][:-1] + \"2\"\r\n finals = sum(finals_list, [])\r\n # split idiom into two words who's length is 2\r\n elif len(word) == 4:\r\n finals_list = [finals[:2], finals[2:]]\r\n finals = []\r\n for sub in finals_list:\r\n if self._all_tone_three(sub):\r\n sub[0] = sub[0][:-1] + \"2\"\r\n finals += sub\r\n\r\n return finals\r\n\r\n def _all_tone_three(self, finals: List[str]) -> bool:\r\n return all(x[-1] == \"3\" for x in finals)\r\n\r\n # merge \"不\" and the word behind it\r\n # if don't merge, \"不\" sometimes appears alone according to jieba, which may occur sandhi error\r\n def __merge_bu(self, seg_list: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\r\n \"\"\"\r\n 合并'不'字,在jieba中'不'字单独出现可能会引起错误\r\n \"\"\"\r\n last_words = \"\"\r\n new_seg_list = []\r\n # 在分词列表中查找单独出现的'不'字\r\n for words, speech_part in seg_list:\r\n if last_words == \"不\" and words != \"不\":\r\n words = last_words + words\r\n if words != \"不\":\r\n new_seg_list.append((words, speech_part))\r\n last_words = words\r\n if last_words == \"不\":\r\n new_seg_list.append((last_words, \"d\"))\r\n return new_seg_list\r\n\r\n # function 1: merge \"一\" and reduplication words in it's left and right, e.g. \"听\",\"一\",\"听\" ->\"听一听\"\r\n # function 2: merge single \"一\" and the word behind it\r\n # if don't merge, \"一\" sometimes appears alone according to jieba, which may occur sandhi error\r\n # e.g.\r\n # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]\r\n # output seg: [['听一听', 'v']]\r\n def __merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\r\n \"\"\"\r\n 合并'一'字,在jieba中'不'字单独出现可能会引起错误\r\n \"\"\"\r\n new_seg = []\r\n # function 1\r\n for i, (word, pos) in enumerate(seg):\r\n if (\r\n i - 1 >= 0\r\n and word == \"一\"\r\n and i + 1 < len(seg)\r\n and seg[i - 1][0] == seg[i + 1][0]\r\n and seg[i - 1][1] == \"v\"\r\n ):\r\n new_seg[i - 1][0] = new_seg[i - 1][0] + \"一\" + new_seg[i - 1][0]\r\n else:\r\n if (\r\n i - 2 >= 0\r\n and seg[i - 1][0] == \"一\"\r\n and seg[i - 2][0] == word\r\n and pos == \"v\"\r\n ):\r\n continue\r\n else:\r\n new_seg.append([word, pos])\r\n seg = new_seg\r\n new_seg = []\r\n # function 2\r\n for i, (word, pos) in enumerate(seg):\r\n if new_seg and new_seg[-1][0] == \"一\":\r\n new_seg[-1][0] = new_seg[-1][0] + word\r\n else:\r\n new_seg.append([word, pos])\r\n return new_seg\r\n\r\n # the first and the second words are all_tone_three\r\n def __merge_continuous_three_tones(\r\n self, seg: List[Tuple[str, str]]\r\n ) -> List[Tuple[str, str]]:\r\n new_seg = []\r\n sub_finals_list = [\r\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\r\n for (word, pos) in seg\r\n ]\r\n assert len(sub_finals_list) == len(seg)\r\n merge_last = [False] * len(seg)\r\n for i, (word, pos) in enumerate(seg):\r\n if (\r\n i - 1 >= 0\r\n and self._all_tone_three(sub_finals_list[i - 1])\r\n and self._all_tone_three(sub_finals_list[i])\r\n and not merge_last[i - 1]\r\n ):\r\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\r\n if (\r\n not self._is_reduplication(seg[i - 1][0])\r\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\r\n ):\r\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\r\n merge_last[i] = True\r\n else:\r\n new_seg.append([word, pos])\r\n else:\r\n new_seg.append([word, pos])\r\n\r\n return new_seg\r\n\r\n def _is_reduplication(self, word: str) -> bool:\r\n return len(word) == 2 and word[0] == word[1]\r\n\r\n # the last char of first word and the first char of second word is tone_three\r\n def __merge_continuous_three_tones_2(\r\n self, seg: List[Tuple[str, str]]\r\n ) -> List[Tuple[str, str]]:\r\n new_seg = []\r\n sub_finals_list = [\r\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\r\n for (word, pos) in seg\r\n ]\r\n assert len(sub_finals_list) == len(seg)\r\n merge_last = [False] * len(seg)\r\n for i, (word, pos) in enumerate(seg):\r\n if (\r\n i - 1 >= 0\r\n and sub_finals_list[i - 1][-1][-1] == \"3\"\r\n and sub_finals_list[i][0][-1] == \"3\"\r\n and not merge_last[i - 1]\r\n ):\r\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\r\n if (\r\n not self._is_reduplication(seg[i - 1][0])\r\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\r\n ):\r\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\r\n merge_last[i] = True\r\n else:\r\n new_seg.append([word, pos])\r\n else:\r\n new_seg.append([word, pos])\r\n return new_seg\r\n\r\n def __merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\r\n \"\"\"\r\n 合并'儿'话\r\n \"\"\"\r\n new_seg = []\r\n for i, (word, pos) in enumerate(seg):\r\n if i - 1 >= 0 and word == \"儿\" and seg[i - 1][0] != \"#\":\r\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\r\n else:\r\n new_seg.append([word, pos])\r\n return new_seg\r\n\r\n def __merge_reduplication(\r\n self, seg: List[Tuple[str, str]]\r\n ) -> List[Tuple[str, str]]:\r\n \"\"\"\r\n 合并单独的叠词\r\n \"\"\"\r\n new_seg = []\r\n for i, (word, pos) in enumerate(seg):\r\n if new_seg and word == new_seg[-1][0]:\r\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\r\n else:\r\n new_seg.append([word, pos])\r\n return new_seg\r\n\r\n def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\r\n \"\"\"\r\n 自定义jieba分词合并处理\r\n\r\n 输入: jieba分词结果列表\r\n \"\"\"\r\n seg = self.__merge_bu(seg)\r\n try:\r\n seg = self.__merge_yi(seg)\r\n except:\r\n log_instance.warning(\"jieba中文分词:合并'一'字失败\")\r\n # print(\"jieba中文分词:合并'一'字失败\")\r\n log_instance.debug(\"jieba中文分词:合并相同的字词\")\r\n seg = self.__merge_reduplication(seg)\r\n log_instance.debug(seg)\r\n seg = self.__merge_continuous_three_tones(seg)\r\n seg = self.__merge_continuous_three_tones_2(seg)\r\n seg = self.__merge_er(seg)\r\n return seg\r\n\r\n def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:\r\n finals = self._bu_sandhi(word, finals)\r\n finals = self._yi_sandhi(word, finals)\r\n finals = self._neural_sandhi(word, pos, finals)\r\n finals = self._three_sandhi(word, finals)\r\n return finals\r"
},
{
"identifier": "log_instance",
"path": "log.py",
"snippet": "DISABLED_LOGGER = [\"gradio.processing_utils\", \"gradio\", \"httpx\"]\r"
}
] | import os
import re
import cn2an
import jieba.posseg as psg
from typing import List, Dict
from pypinyin import lazy_pinyin, Style
from .symbols import punctuation
from .chinese_tone_sandhi import ToneSandhi
from log import log_instance
| 8,307 |
REP_MAP = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
'"': "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
class ChineseG2P:
def __init__(self) -> None:
self.tone_modifier = ToneSandhi()
self.pinyin_to_symbol_map: Dict[str, str] = {}
self.__read_opencpop_symbol_map()
def __read_opencpop_symbol_map(self):
"""
取读opencpop数据
"""
f = open("onnx/Text/opencpop-strict.txt", "r")
for line in f.readlines():
self.pinyin_to_symbol_map[line.split("\t")[0]] = line.strip().split("\t")[1]
f.close()
@staticmethod
def __get_initials_finals(word):
initials = []
finals = []
orig_initials = lazy_pinyin(
word, neutral_tone_with_five=True, style=Style.INITIALS
)
orig_finals = lazy_pinyin(
word, neutral_tone_with_five=True, style=Style.FINALS_TONE3
)
for c, v in zip(orig_initials, orig_finals):
initials.append(c)
finals.append(v)
return initials, finals
def g2p(self, segments_list: List[str]):
phones_list = []
tones_list = []
word2ph = []
for seg in segments_list:
seg_cut = psg.lcut(seg)
initials = []
finals = []
seg_cut = self.tone_modifier.pre_merge_for_modify(seg_cut)
for word, pos in seg_cut:
if pos == "eng":
continue
sub_initials, sub_finals = self.__get_initials_finals(word)
sub_finals = self.tone_modifier.modified_tone(word, pos, sub_finals)
initials.append(sub_initials)
finals.append(sub_finals)
# assert len(sub_initials) == len(sub_finals) == len(word)
initials = sum(initials, [])
finals = sum(finals, [])
#
for c, v in zip(initials, finals):
raw_pinyin = c + v
# NOTE: post process for pypinyin outputs
# we discriminate i, ii and iii
if c == v:
|
REP_MAP = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
'"': "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
class ChineseG2P:
def __init__(self) -> None:
self.tone_modifier = ToneSandhi()
self.pinyin_to_symbol_map: Dict[str, str] = {}
self.__read_opencpop_symbol_map()
def __read_opencpop_symbol_map(self):
"""
取读opencpop数据
"""
f = open("onnx/Text/opencpop-strict.txt", "r")
for line in f.readlines():
self.pinyin_to_symbol_map[line.split("\t")[0]] = line.strip().split("\t")[1]
f.close()
@staticmethod
def __get_initials_finals(word):
initials = []
finals = []
orig_initials = lazy_pinyin(
word, neutral_tone_with_five=True, style=Style.INITIALS
)
orig_finals = lazy_pinyin(
word, neutral_tone_with_five=True, style=Style.FINALS_TONE3
)
for c, v in zip(orig_initials, orig_finals):
initials.append(c)
finals.append(v)
return initials, finals
def g2p(self, segments_list: List[str]):
phones_list = []
tones_list = []
word2ph = []
for seg in segments_list:
seg_cut = psg.lcut(seg)
initials = []
finals = []
seg_cut = self.tone_modifier.pre_merge_for_modify(seg_cut)
for word, pos in seg_cut:
if pos == "eng":
continue
sub_initials, sub_finals = self.__get_initials_finals(word)
sub_finals = self.tone_modifier.modified_tone(word, pos, sub_finals)
initials.append(sub_initials)
finals.append(sub_finals)
# assert len(sub_initials) == len(sub_finals) == len(word)
initials = sum(initials, [])
finals = sum(finals, [])
#
for c, v in zip(initials, finals):
raw_pinyin = c + v
# NOTE: post process for pypinyin outputs
# we discriminate i, ii and iii
if c == v:
| assert c in punctuation
| 0 | 2023-12-21 13:50:50+00:00 | 12k |
lipku/metahuman-stream | nerf_triplane/network.py | [
{
"identifier": "get_encoder",
"path": "encoding.py",
"snippet": "def get_encoder(encoding, input_dim=3, \n multires=6, \n degree=4,\n num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=2048, align_corners=False,\n **kwargs):\n\n if encoding == 'None':\n return lambda x, **kwargs: x, input_dim\n \n elif encoding == 'frequency':\n from freqencoder import FreqEncoder\n encoder = FreqEncoder(input_dim=input_dim, degree=multires)\n\n elif encoding == 'spherical_harmonics':\n from shencoder import SHEncoder\n encoder = SHEncoder(input_dim=input_dim, degree=degree)\n\n elif encoding == 'hashgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='hash', align_corners=align_corners)\n \n elif encoding == 'tiledgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='tiled', align_corners=align_corners)\n \n elif encoding == 'ash':\n from ashencoder import AshEncoder\n encoder = AshEncoder(input_dim=input_dim, output_dim=16, log2_hashmap_size=log2_hashmap_size, resolution=desired_resolution)\n\n else:\n raise NotImplementedError('Unknown encoding mode, choose from [None, frequency, spherical_harmonics, hashgrid, tiledgrid]')\n\n return encoder, encoder.output_dim"
},
{
"identifier": "NeRFRenderer",
"path": "nerf_triplane/renderer.py",
"snippet": "class NeRFRenderer(nn.Module):\n def __init__(self, opt):\n\n super().__init__()\n\n self.opt = opt\n self.bound = opt.bound\n self.cascade = 1 + math.ceil(math.log2(opt.bound))\n self.grid_size = 128\n self.density_scale = 1\n\n self.min_near = opt.min_near\n self.density_thresh = opt.density_thresh\n self.density_thresh_torso = opt.density_thresh_torso\n\n self.exp_eye = opt.exp_eye\n self.test_train = opt.test_train\n self.smooth_lips = opt.smooth_lips\n\n self.torso = opt.torso\n self.cuda_ray = opt.cuda_ray\n\n # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax)\n # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing.\n aabb_train = torch.FloatTensor([-opt.bound, -opt.bound/2, -opt.bound, opt.bound, opt.bound/2, opt.bound])\n aabb_infer = aabb_train.clone()\n self.register_buffer('aabb_train', aabb_train)\n self.register_buffer('aabb_infer', aabb_infer)\n\n # individual codes\n self.individual_num = opt.ind_num\n\n self.individual_dim = opt.ind_dim\n if self.individual_dim > 0:\n self.individual_codes = nn.Parameter(torch.randn(self.individual_num, self.individual_dim) * 0.1) \n \n if self.torso:\n self.individual_dim_torso = opt.ind_dim_torso\n if self.individual_dim_torso > 0:\n self.individual_codes_torso = nn.Parameter(torch.randn(self.individual_num, self.individual_dim_torso) * 0.1) \n\n # optimize camera pose\n self.train_camera = self.opt.train_camera\n if self.train_camera:\n self.camera_dR = nn.Parameter(torch.zeros(self.individual_num, 3)) # euler angle\n self.camera_dT = nn.Parameter(torch.zeros(self.individual_num, 3)) # xyz offset\n\n # extra state for cuda raymarching\n \n # 3D head density grid\n density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n\n # 2D torso density grid\n if self.torso:\n density_grid_torso = torch.zeros([self.grid_size ** 2]) # [H * H]\n self.register_buffer('density_grid_torso', density_grid_torso)\n self.mean_density_torso = 0\n\n # step counter\n step_counter = torch.zeros(16, 2, dtype=torch.int32) # 16 is hardcoded for averaging...\n self.register_buffer('step_counter', step_counter)\n self.mean_count = 0\n self.local_step = 0\n \n # decay for enc_a\n if self.smooth_lips:\n self.enc_a = None\n \n def forward(self, x, d):\n raise NotImplementedError()\n\n # separated density and color query (can accelerate non-cuda-ray mode.)\n def density(self, x):\n raise NotImplementedError()\n\n def color(self, x, d, mask=None, **kwargs):\n raise NotImplementedError()\n\n def reset_extra_state(self):\n if not self.cuda_ray:\n return \n # density grid\n self.density_grid.zero_()\n self.mean_density = 0\n self.iter_density = 0\n # step counter\n self.step_counter.zero_()\n self.mean_count = 0\n self.local_step = 0\n\n\n def run_cuda(self, rays_o, rays_d, auds, bg_coords, poses, eye=None, index=0, dt_gamma=0, bg_color=None, perturb=False, force_all_rays=False, max_steps=1024, T_thresh=1e-4, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # auds: [B, 16]\n # index: [B]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n bg_coords = bg_coords.contiguous().view(-1, 2)\n\n # only add camera offset at training!\n if self.train_camera and (self.training or self.test_train):\n dT = self.camera_dT[index] # [1, 3]\n dR = euler_angles_to_matrix(self.camera_dR[index] / 180 * np.pi + 1e-8).squeeze(0) # [1, 3] --> [3, 3]\n \n rays_o = rays_o + dT\n rays_d = rays_d @ dR\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n results = {}\n\n # pre-calculate near far\n nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, self.aabb_train if self.training else self.aabb_infer, self.min_near)\n nears = nears.detach()\n fars = fars.detach()\n\n # encode audio\n enc_a = self.encode_audio(auds) # [1, 64]\n\n if enc_a is not None and self.smooth_lips:\n if self.enc_a is not None:\n _lambda = 0.35\n enc_a = _lambda * self.enc_a + (1 - _lambda) * enc_a\n self.enc_a = enc_a\n\n \n if self.individual_dim > 0:\n if self.training:\n ind_code = self.individual_codes[index]\n # use a fixed ind code for the unknown test data.\n else:\n ind_code = self.individual_codes[0]\n else:\n ind_code = None\n\n if self.training:\n # setup counter\n counter = self.step_counter[self.local_step % 16]\n counter.zero_() # set to 0\n self.local_step += 1\n\n xyzs, dirs, deltas, rays = raymarching.march_rays_train(rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, counter, self.mean_count, perturb, 128, force_all_rays, dt_gamma, max_steps)\n sigmas, rgbs, amb_aud, amb_eye, uncertainty = self(xyzs, dirs, enc_a, ind_code, eye)\n sigmas = self.density_scale * sigmas\n\n #print(f'valid RGB query ratio: {mask.sum().item() / mask.shape[0]} (total = {mask.sum().item()})')\n\n # weights_sum, ambient_sum, uncertainty_sum, depth, image = raymarching.composite_rays_train_uncertainty(sigmas, rgbs, ambient.abs().sum(-1), uncertainty, deltas, rays)\n weights_sum, amb_aud_sum, amb_eye_sum, uncertainty_sum, depth, image = raymarching.composite_rays_train_triplane(sigmas, rgbs, amb_aud.abs().sum(-1), amb_eye.abs().sum(-1), uncertainty, deltas, rays)\n\n # for training only\n results['weights_sum'] = weights_sum\n results['ambient_aud'] = amb_aud_sum\n results['ambient_eye'] = amb_eye_sum\n results['uncertainty'] = uncertainty_sum\n\n results['rays'] = xyzs, dirs, enc_a, ind_code, eye\n\n else:\n \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n amb_aud_sum = torch.zeros(N, dtype=dtype, device=device)\n amb_eye_sum = torch.zeros(N, dtype=dtype, device=device)\n uncertainty_sum = torch.zeros(N, dtype=dtype, device=device)\n\n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = nears.clone() # [N]\n\n step = 0\n \n while step < max_steps:\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n \n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n n_step = max(min(N // n_alive, 8), 1)\n\n xyzs, dirs, deltas = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, 128, perturb if step == 0 else False, dt_gamma, max_steps)\n\n sigmas, rgbs, ambients_aud, ambients_eye, uncertainties = self(xyzs, dirs, enc_a, ind_code, eye)\n sigmas = self.density_scale * sigmas\n\n # raymarching.composite_rays_uncertainty(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, deltas, ambients, uncertainties, weights_sum, depth, image, ambient_sum, uncertainty_sum, T_thresh)\n raymarching.composite_rays_triplane(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, deltas, ambients_aud, ambients_eye, uncertainties, weights_sum, depth, image, amb_aud_sum, amb_eye_sum, uncertainty_sum, T_thresh)\n\n rays_alive = rays_alive[rays_alive >= 0]\n\n # print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}')\n\n step += n_step\n \n torso_results = self.run_torso(rays_o, bg_coords, poses, index, bg_color)\n bg_color = torso_results['bg_color']\n\n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n image = image.view(*prefix, 3)\n image = image.clamp(0, 1)\n\n depth = torch.clamp(depth - nears, min=0) / (fars - nears)\n depth = depth.view(*prefix)\n\n amb_aud_sum = amb_aud_sum.view(*prefix)\n amb_eye_sum = amb_eye_sum.view(*prefix)\n\n results['depth'] = depth\n results['image'] = image # head_image if train, else com_image\n results['ambient_aud'] = amb_aud_sum\n results['ambient_eye'] = amb_eye_sum\n results['uncertainty'] = uncertainty_sum\n\n return results\n \n\n def run_torso(self, rays_o, bg_coords, poses, index=0, bg_color=None, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # auds: [B, 16]\n # index: [B]\n # return: image: [B, N, 3], depth: [B, N]\n\n rays_o = rays_o.contiguous().view(-1, 3)\n bg_coords = bg_coords.contiguous().view(-1, 2)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n results = {}\n\n # background\n if bg_color is None:\n bg_color = 1\n\n # first mix torso with background\n if self.torso:\n # torso ind code\n if self.individual_dim_torso > 0:\n if self.training:\n ind_code_torso = self.individual_codes_torso[index]\n # use a fixed ind code for the unknown test data.\n else:\n ind_code_torso = self.individual_codes_torso[0]\n else:\n ind_code_torso = None\n \n # 2D density grid for acceleration...\n density_thresh_torso = min(self.density_thresh_torso, self.mean_density_torso)\n occupancy = F.grid_sample(self.density_grid_torso.view(1, 1, self.grid_size, self.grid_size), bg_coords.view(1, -1, 1, 2), align_corners=True).view(-1)\n mask = occupancy > density_thresh_torso\n\n # masked query of torso\n torso_alpha = torch.zeros([N, 1], device=device)\n torso_color = torch.zeros([N, 3], device=device)\n\n if mask.any():\n torso_alpha_mask, torso_color_mask, deform = self.forward_torso(bg_coords[mask], poses, ind_code_torso)\n\n torso_alpha[mask] = torso_alpha_mask.float()\n torso_color[mask] = torso_color_mask.float()\n\n results['deform'] = deform\n \n # first mix torso with background\n \n bg_color = torso_color * torso_alpha + bg_color * (1 - torso_alpha)\n\n results['torso_alpha'] = torso_alpha\n results['torso_color'] = bg_color\n\n # print(torso_alpha.shape, torso_alpha.max().item(), torso_alpha.min().item())\n \n results['bg_color'] = bg_color\n \n return results\n\n\n @torch.no_grad()\n def mark_untrained_grid(self, poses, intrinsic, S=64):\n # poses: [B, 4, 4]\n # intrinsic: [3, 3]\n\n if not self.cuda_ray:\n return\n \n if isinstance(poses, np.ndarray):\n poses = torch.from_numpy(poses)\n\n B = poses.shape[0]\n \n fx, fy, cx, cy = intrinsic\n \n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n\n count = torch.zeros_like(self.density_grid)\n poses = poses.to(count.device)\n\n # 5-level loop, forgive me...\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n world_xyzs = (2 * coords.float() / (self.grid_size - 1) - 1).unsqueeze(0) # [1, N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_world_xyzs = world_xyzs * (bound - half_grid_size)\n\n # split batch to avoid OOM\n head = 0\n while head < B:\n tail = min(head + S, B)\n\n # world2cam transform (poses is c2w, so we need to transpose it. Another transpose is needed for batched matmul, so the final form is without transpose.)\n cam_xyzs = cas_world_xyzs - poses[head:tail, :3, 3].unsqueeze(1)\n cam_xyzs = cam_xyzs @ poses[head:tail, :3, :3] # [S, N, 3]\n \n # query if point is covered by any camera\n mask_z = cam_xyzs[:, :, 2] > 0 # [S, N]\n mask_x = torch.abs(cam_xyzs[:, :, 0]) < cx / fx * cam_xyzs[:, :, 2] + half_grid_size * 2\n mask_y = torch.abs(cam_xyzs[:, :, 1]) < cy / fy * cam_xyzs[:, :, 2] + half_grid_size * 2\n mask = (mask_z & mask_x & mask_y).sum(0).reshape(-1) # [N]\n\n # update count \n count[cas, indices] += mask\n head += S\n \n # mark untrained grid as -1\n self.density_grid[count == 0] = -1\n\n #print(f'[mark untrained grid] {(count == 0).sum()} from {resolution ** 3 * self.cascade}')\n\n @torch.no_grad()\n def update_extra_state(self, decay=0.95, S=128):\n # call before each epoch to update extra states.\n\n if not self.cuda_ray:\n return \n \n # use random auds (different expressions should have similar density grid...)\n rand_idx = random.randint(0, self.aud_features.shape[0] - 1)\n auds = get_audio_features(self.aud_features, self.att, rand_idx).to(self.density_bitfield.device)\n\n # encode audio\n enc_a = self.encode_audio(auds)\n\n ### update density grid\n if not self.torso: # forbid updating head if is training torso...\n\n tmp_grid = torch.zeros_like(self.density_grid)\n\n # use a random eye area based on training dataset's statistics...\n if self.exp_eye:\n eye = self.eye_area[[rand_idx]].to(self.density_bitfield.device) # [1, 1]\n else:\n eye = None\n \n # full update\n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_xyzs = xyzs * (bound - half_grid_size)\n # add noise in [-hgs, hgs]\n cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size\n # query density\n sigmas = self.density(cas_xyzs, enc_a, eye)['sigma'].reshape(-1).detach().to(tmp_grid.dtype)\n sigmas *= self.density_scale\n # assign \n tmp_grid[cas, indices] = sigmas\n \n # dilate the density_grid (less aggressive culling)\n tmp_grid = raymarching.morton3D_dilation(tmp_grid)\n\n # ema update\n valid_mask = (self.density_grid >= 0) & (tmp_grid >= 0)\n self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])\n self.mean_density = torch.mean(self.density_grid.clamp(min=0)).item() # -1 non-training regions are viewed as 0 density.\n self.iter_density += 1\n\n # convert to bitfield\n density_thresh = min(self.mean_density, self.density_thresh)\n self.density_bitfield = raymarching.packbits(self.density_grid, density_thresh, self.density_bitfield)\n\n ### update torso density grid\n if self.torso:\n tmp_grid_torso = torch.zeros_like(self.density_grid_torso)\n\n # random pose, random ind_code\n rand_idx = random.randint(0, self.poses.shape[0] - 1)\n # pose = convert_poses(self.poses[[rand_idx]]).to(self.density_bitfield.device)\n pose = self.poses[[rand_idx]].to(self.density_bitfield.device)\n\n if self.opt.ind_dim_torso > 0:\n ind_code = self.individual_codes_torso[[rand_idx]]\n else:\n ind_code = None\n\n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n\n half_grid_size = 1 / self.grid_size\n\n for xs in X:\n for ys in Y:\n xx, yy = custom_meshgrid(xs, ys)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1)], dim=-1) # [N, 2], in [0, 128)\n indices = (coords[:, 1] * self.grid_size + coords[:, 0]).long() # NOTE: xy transposed!\n xys = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 2] in [-1, 1]\n xys = xys * (1 - half_grid_size)\n # add noise in [-hgs, hgs]\n xys += (torch.rand_like(xys) * 2 - 1) * half_grid_size\n # query density\n alphas, _, _ = self.forward_torso(xys, pose, ind_code) # [N, 1]\n \n # assign \n tmp_grid_torso[indices] = alphas.squeeze(1).float()\n\n # dilate\n tmp_grid_torso = tmp_grid_torso.view(1, 1, self.grid_size, self.grid_size)\n # tmp_grid_torso = F.max_pool2d(tmp_grid_torso, kernel_size=3, stride=1, padding=1)\n tmp_grid_torso = F.max_pool2d(tmp_grid_torso, kernel_size=5, stride=1, padding=2)\n tmp_grid_torso = tmp_grid_torso.view(-1)\n \n self.density_grid_torso = torch.maximum(self.density_grid_torso * decay, tmp_grid_torso)\n self.mean_density_torso = torch.mean(self.density_grid_torso).item()\n\n # density_thresh_torso = min(self.density_thresh_torso, self.mean_density_torso)\n # print(f'[density grid torso] min={self.density_grid_torso.min().item():.4f}, max={self.density_grid_torso.max().item():.4f}, mean={self.mean_density_torso:.4f}, occ_rate={(self.density_grid_torso > density_thresh_torso).sum() / (128**2):.3f}')\n\n ### update step counter\n total_step = min(16, self.local_step)\n if total_step > 0:\n self.mean_count = int(self.step_counter[:total_step, 0].sum().item() / total_step)\n self.local_step = 0\n\n #print(f'[density grid] min={self.density_grid.min().item():.4f}, max={self.density_grid.max().item():.4f}, mean={self.mean_density:.4f}, occ_rate={(self.density_grid > 0.01).sum() / (128**3 * self.cascade):.3f} | [step counter] mean={self.mean_count}')\n\n\n @torch.no_grad()\n def get_audio_grid(self, S=128):\n # call before each epoch to update extra states.\n\n if not self.cuda_ray:\n return \n \n # use random auds (different expressions should have similar density grid...)\n rand_idx = random.randint(0, self.aud_features.shape[0] - 1)\n auds = get_audio_features(self.aud_features, self.att, rand_idx).to(self.density_bitfield.device)\n\n # encode audio\n enc_a = self.encode_audio(auds)\n tmp_grid = torch.zeros_like(self.density_grid)\n\n # use a random eye area based on training dataset's statistics...\n if self.exp_eye:\n eye = self.eye_area[[rand_idx]].to(self.density_bitfield.device) # [1, 1]\n else:\n eye = None\n \n # full update\n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_xyzs = xyzs * (bound - half_grid_size)\n # add noise in [-hgs, hgs]\n cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size\n # query density\n aud_norms = self.density(cas_xyzs.to(tmp_grid.dtype), enc_a, eye)['ambient_aud'].reshape(-1).detach().to(tmp_grid.dtype)\n # assign \n tmp_grid[cas, indices] = aud_norms\n \n # dilate the density_grid (less aggressive culling)\n tmp_grid = raymarching.morton3D_dilation(tmp_grid)\n return tmp_grid\n # # ema update\n # valid_mask = (self.density_grid >= 0) & (tmp_grid >= 0)\n # self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])\n\n\n @torch.no_grad()\n def get_eye_grid(self, S=128):\n # call before each epoch to update extra states.\n\n if not self.cuda_ray:\n return \n \n # use random auds (different expressions should have similar density grid...)\n rand_idx = random.randint(0, self.aud_features.shape[0] - 1)\n auds = get_audio_features(self.aud_features, self.att, rand_idx).to(self.density_bitfield.device)\n\n # encode audio\n enc_a = self.encode_audio(auds)\n tmp_grid = torch.zeros_like(self.density_grid)\n\n # use a random eye area based on training dataset's statistics...\n if self.exp_eye:\n eye = self.eye_area[[rand_idx]].to(self.density_bitfield.device) # [1, 1]\n else:\n eye = None\n \n # full update\n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_xyzs = xyzs * (bound - half_grid_size)\n # add noise in [-hgs, hgs]\n cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size\n # query density\n eye_norms = self.density(cas_xyzs.to(tmp_grid.dtype), enc_a, eye)['ambient_eye'].reshape(-1).detach().to(tmp_grid.dtype)\n # assign \n tmp_grid[cas, indices] = eye_norms\n \n # dilate the density_grid (less aggressive culling)\n tmp_grid = raymarching.morton3D_dilation(tmp_grid)\n return tmp_grid\n # # ema update\n # valid_mask = (self.density_grid >= 0) & (tmp_grid >= 0)\n # self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])\n\n\n\n def render(self, rays_o, rays_d, auds, bg_coords, poses, staged=False, max_ray_batch=4096, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # auds: [B, 29, 16]\n # eye: [B, 1]\n # bg_coords: [1, N, 2]\n # return: pred_rgb: [B, N, 3]\n\n _run = self.run_cuda\n \n B, N = rays_o.shape[:2]\n device = rays_o.device\n\n # never stage when cuda_ray\n if staged and not self.cuda_ray:\n # not used\n raise NotImplementedError\n\n else:\n results = _run(rays_o, rays_d, auds, bg_coords, poses, **kwargs)\n\n return results\n \n \n def render_torso(self, rays_o, rays_d, auds, bg_coords, poses, staged=False, max_ray_batch=4096, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # auds: [B, 29, 16]\n # eye: [B, 1]\n # bg_coords: [1, N, 2]\n # return: pred_rgb: [B, N, 3]\n\n _run = self.run_torso\n \n B, N = rays_o.shape[:2]\n device = rays_o.device\n\n # never stage when cuda_ray\n if staged and not self.cuda_ray:\n # not used\n raise NotImplementedError\n\n else:\n results = _run(rays_o, bg_coords, poses, **kwargs)\n\n return results"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from encoding import get_encoder
from .renderer import NeRFRenderer | 9,303 |
# Audio feature extractor
class AudioAttNet(nn.Module):
def __init__(self, dim_aud=64, seq_len=8):
super(AudioAttNet, self).__init__()
self.seq_len = seq_len
self.dim_aud = dim_aud
self.attentionConvNet = nn.Sequential( # b x subspace_dim x seq_len
nn.Conv1d(self.dim_aud, 16, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(16, 8, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(8, 4, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(4, 2, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(2, 1, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True)
)
self.attentionNet = nn.Sequential(
nn.Linear(in_features=self.seq_len, out_features=self.seq_len, bias=True),
nn.Softmax(dim=1)
)
def forward(self, x):
# x: [1, seq_len, dim_aud]
y = x.permute(0, 2, 1) # [1, dim_aud, seq_len]
y = self.attentionConvNet(y)
y = self.attentionNet(y.view(1, self.seq_len)).view(1, self.seq_len, 1)
return torch.sum(y * x, dim=1) # [1, dim_aud]
# Audio feature extractor
class AudioNet(nn.Module):
def __init__(self, dim_in=29, dim_aud=64, win_size=16):
super(AudioNet, self).__init__()
self.win_size = win_size
self.dim_aud = dim_aud
self.encoder_conv = nn.Sequential( # n x 29 x 16
nn.Conv1d(dim_in, 32, kernel_size=3, stride=2, padding=1, bias=True), # n x 32 x 8
nn.LeakyReLU(0.02, True),
nn.Conv1d(32, 32, kernel_size=3, stride=2, padding=1, bias=True), # n x 32 x 4
nn.LeakyReLU(0.02, True),
nn.Conv1d(32, 64, kernel_size=3, stride=2, padding=1, bias=True), # n x 64 x 2
nn.LeakyReLU(0.02, True),
nn.Conv1d(64, 64, kernel_size=3, stride=2, padding=1, bias=True), # n x 64 x 1
nn.LeakyReLU(0.02, True),
)
self.encoder_fc1 = nn.Sequential(
nn.Linear(64, 64),
nn.LeakyReLU(0.02, True),
nn.Linear(64, dim_aud),
)
def forward(self, x):
half_w = int(self.win_size/2)
x = x[:, :, 8-half_w:8+half_w]
x = self.encoder_conv(x).squeeze(-1)
x = self.encoder_fc1(x)
return x
class MLP(nn.Module):
def __init__(self, dim_in, dim_out, dim_hidden, num_layers):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.dim_hidden = dim_hidden
self.num_layers = num_layers
net = []
for l in range(num_layers):
net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=False))
self.net = nn.ModuleList(net)
def forward(self, x):
for l in range(self.num_layers):
x = self.net[l](x)
if l != self.num_layers - 1:
x = F.relu(x, inplace=True)
# x = F.dropout(x, p=0.1, training=self.training)
return x
|
# Audio feature extractor
class AudioAttNet(nn.Module):
def __init__(self, dim_aud=64, seq_len=8):
super(AudioAttNet, self).__init__()
self.seq_len = seq_len
self.dim_aud = dim_aud
self.attentionConvNet = nn.Sequential( # b x subspace_dim x seq_len
nn.Conv1d(self.dim_aud, 16, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(16, 8, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(8, 4, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(4, 2, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(2, 1, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True)
)
self.attentionNet = nn.Sequential(
nn.Linear(in_features=self.seq_len, out_features=self.seq_len, bias=True),
nn.Softmax(dim=1)
)
def forward(self, x):
# x: [1, seq_len, dim_aud]
y = x.permute(0, 2, 1) # [1, dim_aud, seq_len]
y = self.attentionConvNet(y)
y = self.attentionNet(y.view(1, self.seq_len)).view(1, self.seq_len, 1)
return torch.sum(y * x, dim=1) # [1, dim_aud]
# Audio feature extractor
class AudioNet(nn.Module):
def __init__(self, dim_in=29, dim_aud=64, win_size=16):
super(AudioNet, self).__init__()
self.win_size = win_size
self.dim_aud = dim_aud
self.encoder_conv = nn.Sequential( # n x 29 x 16
nn.Conv1d(dim_in, 32, kernel_size=3, stride=2, padding=1, bias=True), # n x 32 x 8
nn.LeakyReLU(0.02, True),
nn.Conv1d(32, 32, kernel_size=3, stride=2, padding=1, bias=True), # n x 32 x 4
nn.LeakyReLU(0.02, True),
nn.Conv1d(32, 64, kernel_size=3, stride=2, padding=1, bias=True), # n x 64 x 2
nn.LeakyReLU(0.02, True),
nn.Conv1d(64, 64, kernel_size=3, stride=2, padding=1, bias=True), # n x 64 x 1
nn.LeakyReLU(0.02, True),
)
self.encoder_fc1 = nn.Sequential(
nn.Linear(64, 64),
nn.LeakyReLU(0.02, True),
nn.Linear(64, dim_aud),
)
def forward(self, x):
half_w = int(self.win_size/2)
x = x[:, :, 8-half_w:8+half_w]
x = self.encoder_conv(x).squeeze(-1)
x = self.encoder_fc1(x)
return x
class MLP(nn.Module):
def __init__(self, dim_in, dim_out, dim_hidden, num_layers):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.dim_hidden = dim_hidden
self.num_layers = num_layers
net = []
for l in range(num_layers):
net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=False))
self.net = nn.ModuleList(net)
def forward(self, x):
for l in range(self.num_layers):
x = self.net[l](x)
if l != self.num_layers - 1:
x = F.relu(x, inplace=True)
# x = F.dropout(x, p=0.1, training=self.training)
return x
| class NeRFNetwork(NeRFRenderer): | 1 | 2023-12-19 01:32:46+00:00 | 12k |
MingtaoGuo/AnimateAnyone_unofficial | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
},
{
"identifier": "default",
"path": "ldm/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "ismap",
"path": "ldm/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "isimage",
"path": "ldm/util.py",
"snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)"
},
{
"identifier": "mean_flat",
"path": "ldm/util.py",
"snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))"
},
{
"identifier": "count_params",
"path": "ldm/util.py",
"snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "LitEma",
"path": "ldm/modules/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "normal_kl",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean"
},
{
"identifier": "IdentityFirstStage",
"path": "ldm/models/autoencoder.py",
"snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x"
},
{
"identifier": "AutoencoderKL",
"path": "ldm/models/autoencoder.py",
"snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x"
},
{
"identifier": "make_beta_schedule",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "extract_into_tensor",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "noise_like",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "DDIMSampler",
"path": "ldm/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec"
}
] | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler | 9,741 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key) | count_params(self.model, verbose=True) | 6 | 2023-12-16 03:31:33+00:00 | 12k |
modelscope/scepter | scepter/modules/model/backbone/image/vit_modify.py | [
{
"identifier": "MULTI_HEAD_VIT_MODEL",
"path": "scepter/modules/model/backbone/image/utils/vit.py",
"snippet": "class MULTI_HEAD_VIT_MODEL(nn.Module):\n para_dict = {\n 'INPUT_RESOLUTION': {\n 'value': 224,\n 'description': 'The input resolution of vit model!'\n },\n 'PATCH_SIZE': {\n 'value': 32,\n 'description': 'The patch size of vit model!'\n },\n 'WIDTH': {\n 'value': 768,\n 'description': 'The input embbeding dimention!'\n },\n 'OUTPUT_DIM': {\n 'value': 512,\n 'description': 'The output embbeding dimention!'\n },\n 'LAYERS': {\n 'value': 12,\n 'description': \"All of the vit model's layers!\"\n },\n 'FROZEN_LAYERS': {\n 'value': 6,\n 'description': 'The frozen layers number!'\n },\n 'FT_LAYERS': {\n 'value': 6,\n 'description': 'The finetune layers number!'\n },\n 'MULTI_HEAD': {\n 'value': 2,\n 'description': 'The head number of vit!'\n },\n 'HEADS': {\n 'value': 12,\n 'description': 'The head number of transformer!'\n }\n }\n\n def __init__(self, cfg):\n super().__init__()\n input_resolution = cfg.INPUT_RESOLUTION\n patch_size = cfg.PATCH_SIZE\n width = cfg.WIDTH\n output_dim = cfg.OUTPUT_DIM\n frozen_layers = cfg.FROZEN_LAYERS\n ft_layers = cfg.FT_LAYERS\n self.multi_head = cfg.MULTI_HEAD\n heads = cfg.HEADS\n\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3,\n out_channels=width,\n kernel_size=patch_size,\n stride=patch_size,\n bias=False)\n\n scale = width**-0.5\n self.class_embedding = nn.Parameter(scale *\n torch.randn(width)) # [768]\n self.positional_embedding = nn.Parameter(scale * torch.randn(\n (input_resolution // patch_size)**2 + 1, width)) # [50, 768]\n self.ln_pre = LayerNorm(width)\n\n self.frozen_transformer = FrozenTransformer(width, frozen_layers,\n heads)\n self.frozen_transformer.eval()\n\n if self.multi_head == 2:\n self.ft_transformer_1 = Transformer(width, ft_layers, heads)\n self.ln_post_1 = LayerNorm(width)\n self.proj_1 = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.ft_transformer_2 = Transformer(width, ft_layers, heads)\n self.ln_post_2 = LayerNorm(width)\n self.proj_2 = nn.Parameter(scale * torch.randn(width, output_dim))\n elif self.multi_head == 3:\n self.ft_transformer_1 = Transformer(width, ft_layers, heads)\n self.ln_post_1 = LayerNorm(width)\n self.proj_1 = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.ft_transformer_2 = Transformer(width, ft_layers, heads)\n self.ln_post_2 = LayerNorm(width)\n self.proj_2 = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.ft_transformer_3 = Transformer(width, ft_layers, heads)\n self.ln_post_3 = LayerNorm(width)\n self.proj_3 = nn.Parameter(scale * torch.randn(width, output_dim))\n elif self.multi_head == 4:\n self.ft_transformer_1 = Transformer(width, ft_layers, heads)\n self.ln_post_1 = LayerNorm(width)\n self.proj_1 = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.ft_transformer_2 = Transformer(width, ft_layers, heads)\n self.ln_post_2 = LayerNorm(width)\n self.proj_2 = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.ft_transformer_3 = Transformer(width, ft_layers, heads)\n self.ln_post_3 = LayerNorm(width)\n self.proj_3 = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.ft_transformer_4 = Transformer(width, ft_layers, heads)\n self.ln_post_4 = LayerNorm(width)\n self.proj_4 = nn.Parameter(scale * torch.randn(width, output_dim))\n\n def forward(self, x: torch.Tensor):\n with torch.no_grad():\n x = self.conv1(\n x) # shape = [*, width, grid, grid] -> [1, 768, 7, 7]\n x = x.reshape(\n x.shape[0], x.shape[1],\n -1) # shape = [*, width, grid ** 2] -> [1, 768, 49] 49token\n x = x.permute(0, 2,\n 1) # shape = [*, grid ** 2, width] -> [1, 49, 768]\n x = torch.cat([\n self.class_embedding.to(x.dtype) + torch.zeros(\n x.shape[0], 1, x.shape[-1], dtype=x.dtype,\n device=x.device), x\n ],\n dim=1\n ) # shape = [*, grid ** 2 + 1, width]-> [1, 50, 768]\n x = x + self.positional_embedding.to(x.dtype) # [1, 50, 768]\n x = self.ln_pre(x)\n x = x.permute(1, 0, 2) # NLD -> LND [50, 1, 768]\n x = self.frozen_transformer(x)\n\n if self.multi_head == 2:\n sub_x1 = self.ft_transformer_1(x)\n sub_x1 = sub_x1.permute(1, 0, 2) # LND -> NLD\n sub_x1 = self.ln_post_1(sub_x1[:, 0, :])\n sub_x1 = sub_x1 @ self.proj_1\n\n sub_x2 = self.ft_transformer_2(x)\n sub_x2 = sub_x2.permute(1, 0, 2) # LND -> NLD\n sub_x2 = self.ln_post_2(sub_x2[:, 0, :])\n sub_x2 = sub_x2 @ self.proj_2\n\n return sub_x1, sub_x2\n elif self.multi_head == 3:\n sub_x1 = self.ft_transformer_1(x)\n sub_x1 = sub_x1.permute(1, 0, 2) # LND -> NLD\n sub_x1 = self.ln_post_1(sub_x1[:, 0, :])\n sub_x1 = sub_x1 @ self.proj_1\n\n sub_x2 = self.ft_transformer_2(x)\n sub_x2 = sub_x2.permute(1, 0, 2) # LND -> NLD\n sub_x2 = self.ln_post_2(sub_x2[:, 0, :])\n sub_x2 = sub_x2 @ self.proj_2\n\n sub_x3 = self.ft_transformer_3(x)\n sub_x3 = sub_x3.permute(1, 0, 2) # LND -> NLD\n sub_x3 = self.ln_post_3(sub_x3[:, 0, :])\n sub_x3 = sub_x3 @ self.proj_3\n\n return sub_x1, sub_x2, sub_x3\n elif self.multi_head == 4:\n sub_x1 = self.ft_transformer_1(x)\n sub_x1 = sub_x1.permute(1, 0, 2) # LND -> NLD\n sub_x1 = self.ln_post_1(sub_x1[:, 0, :])\n sub_x1 = sub_x1 @ self.proj_1\n\n sub_x2 = self.ft_transformer_2(x)\n sub_x2 = sub_x2.permute(1, 0, 2) # LND -> NLD\n sub_x2 = self.ln_post_2(sub_x2[:, 0, :])\n sub_x2 = sub_x2 @ self.proj_2\n\n sub_x3 = self.ft_transformer_3(x)\n sub_x3 = sub_x3.permute(1, 0, 2) # LND -> NLD\n sub_x3 = self.ln_post_3(sub_x3[:, 0, :])\n sub_x3 = sub_x3 @ self.proj_3\n\n sub_x4 = self.ft_transformer_4(x)\n sub_x4 = sub_x4.permute(1, 0, 2) # LND -> NLD\n sub_x4 = self.ln_post_4(sub_x4[:, 0, :])\n sub_x4 = sub_x4 @ self.proj_4\n\n return sub_x1, sub_x2, sub_x3, sub_x4"
},
{
"identifier": "VIT",
"path": "scepter/modules/model/backbone/image/utils/vit.py",
"snippet": "class VIT(nn.Module):\n para_dict = {\n 'INPUT_RESOLUTION': {\n 'value': 224,\n 'description': 'The input resolution of vit model!'\n },\n 'PATCH_SIZE': {\n 'value': 32,\n 'description': 'The patch size of vit model!'\n },\n 'WIDTH': {\n 'value': 768,\n 'description': 'The input embbeding dimention!'\n },\n 'OUTPUT_DIM': {\n 'value': 512,\n 'description': 'The output embbeding dimention!'\n },\n 'LAYERS': {\n 'value': 12,\n 'description': \"Model's all layers num!\"\n },\n 'HEADS': {\n 'value': 12,\n 'description': 'The head number of transformer!'\n },\n 'EXPORT': {\n 'value': False,\n 'description': 'Whether export model or not!'\n },\n 'TOKEN_WISE': {\n 'value': False,\n 'description': 'Whether output token wise feature or not!'\n }\n }\n\n def __init__(self, cfg, logger=None):\n super().__init__()\n input_resolution = cfg.INPUT_RESOLUTION\n width = cfg.WIDTH\n patch_size = cfg.PATCH_SIZE\n layers = cfg.LAYERS\n heads = cfg.HEADS\n output_dim = cfg.OUTPUT_DIM\n use_proj = cfg.get('USE_PROJ', True)\n self.export = cfg.get('EXPORT', False)\n self.token_wise = cfg.get('TOKEN_WISE', False)\n self.conv1 = nn.Conv2d(in_channels=3,\n out_channels=width,\n kernel_size=patch_size,\n stride=patch_size,\n bias=False)\n scale = width**-0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn(\n (input_resolution // patch_size)**2 + 1, width))\n self.ln_pre = LayerNorm(width)\n self.transformer = Transformer(width, layers, heads)\n\n self.ln_post = LayerNorm(width)\n if use_proj:\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n else:\n self.proj = None\n\n @property\n def dtype(self):\n return self.conv1.weight.dtype\n\n def forward(self, x: torch.Tensor):\n x = self.conv1(x.type(self.dtype)) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1],\n -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n # x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0],\n # 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]\n if self.export:\n x = torch.cat([\n self.class_embedding.to(x.dtype).view(x.shape[0], 1,\n x.shape[-1]), x\n ],\n dim=1) # shape = [*, grid ** 2 + 1, width]\n else:\n x = torch.cat([\n self.class_embedding.to(x.dtype) + torch.zeros(\n x.shape[0], 1, x.shape[-1], dtype=x.dtype,\n device=x.device), x\n ],\n dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.positional_embedding.to(x.dtype)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n if self.token_wise:\n return self.ln_post(x)\n x = self.ln_post(x[:, 0, :])\n if not self.export:\n if self.proj is not None:\n x = x @ self.proj\n return x\n else:\n before_proj = x\n if self.proj is not None:\n x = before_proj @ self.proj\n return before_proj, x"
},
{
"identifier": "VIT_MODEL",
"path": "scepter/modules/model/backbone/image/utils/vit.py",
"snippet": "class VIT_MODEL(nn.Module):\n '''\n INPUT_RESOLUTION: 224\n PATCH_SIZE: 32\n WIDTH: 768\n OUTPUT_DIM: 512\n LAYERS: 12\n HEADS: 12\n '''\n para_dict = {\n 'INPUT_RESOLUTION': {\n 'value': 224,\n 'description': 'The input resolution of vit model!'\n },\n 'PATCH_SIZE': {\n 'value': 32,\n 'description': 'The patch size of vit model!'\n },\n 'WIDTH': {\n 'value': 768,\n 'description': 'The input embbeding dimention!'\n },\n 'OUTPUT_DIM': {\n 'value': 512,\n 'description': 'The output embbeding dimention!'\n },\n 'FROZEN_LAYERS': {\n 'value': 6,\n 'description': \"Frozen model's layers num!\"\n },\n 'FT_LAYERS': {\n 'value': 6,\n 'description': \"Finetune model's layers num!\"\n },\n 'HEADS': {\n 'value': 12,\n 'description': 'The head number of transformer!'\n }\n }\n\n def __init__(self, cfg):\n super().__init__()\n input_resolution = cfg.INPUT_RESOLUTION\n patch_size = cfg.PATCH_SIZE\n width = cfg.WIDTH\n output_dim = cfg.OUTPUT_DIM\n frozen_layers = cfg.FROZEN_LAYERS\n ft_layers = cfg.FT_LAYERS\n heads = cfg.HEADS\n\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3,\n out_channels=width,\n kernel_size=patch_size,\n stride=patch_size,\n bias=False)\n\n scale = width**-0.5\n self.class_embedding = nn.Parameter(scale *\n torch.randn(width)) # [768]\n self.positional_embedding = nn.Parameter(scale * torch.randn(\n (input_resolution // patch_size)**2 + 1, width)) # [50, 768]\n self.ln_pre = LayerNorm(width)\n\n self.frozen_transformer = FrozenTransformer(width, frozen_layers,\n heads)\n self.frozen_transformer.eval()\n\n self.ft_transformer = Transformer(width, ft_layers, heads)\n\n self.ln_post = LayerNorm(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n def forward(self, x: torch.Tensor):\n with torch.no_grad():\n x = self.conv1(\n x) # shape = [*, width, grid, grid] -> [1, 768, 7, 7]\n x = x.reshape(\n x.shape[0], x.shape[1],\n -1) # shape = [*, width, grid ** 2] -> [1, 768, 49] 49token\n x = x.permute(0, 2,\n 1) # shape = [*, grid ** 2, width] -> [1, 49, 768]\n x = torch.cat([\n self.class_embedding.to(x.dtype) + torch.zeros(\n x.shape[0], 1, x.shape[-1], dtype=x.dtype,\n device=x.device), x\n ],\n dim=1\n ) # shape = [*, grid ** 2 + 1, width]-> [1, 50, 768]\n x = x + self.positional_embedding.to(x.dtype) # [1, 50, 768]\n x = self.ln_pre(x)\n x = x.permute(1, 0, 2) # NLD -> LND [50, 1, 768]\n x = self.frozen_transformer(x)\n x = self.ft_transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n x = self.ln_post(x[:, 0, :])\n\n if self.proj is not None:\n x = x @ self.proj\n\n return x"
},
{
"identifier": "MULTI_HEAD_VIT_MODEL_Split",
"path": "scepter/modules/model/backbone/image/utils/vit.py",
"snippet": "class MULTI_HEAD_VIT_MODEL_Split(nn.Module):\n para_dict = {\n 'INPUT_RESOLUTION': {\n 'value': 224,\n 'description': 'The input resolution of vit model!'\n },\n 'PATCH_SIZE': {\n 'value': 32,\n 'description': 'The patch size of vit model!'\n },\n 'WIDTH': {\n 'value': 768,\n 'description': 'The input embbeding dimention!'\n },\n 'OUTPUT_DIM': {\n 'value': 512,\n 'description': 'The output embbeding dimention!'\n },\n 'LAYERS': {\n 'value': 12,\n 'description': \"All of the vit model's layers!\"\n },\n 'FROZEN_LAYERS': {\n 'value': 6,\n 'description': 'The frozen layers number!'\n },\n 'FT_LAYERS': {\n 'value': 6,\n 'description': 'The finetune layers number!'\n },\n 'PART': {\n 'value': 'backbone',\n 'description': 'The part name of vit!'\n },\n 'HEADS': {\n 'value': 12,\n 'description': 'The head number of transformer!'\n }\n }\n\n def __init__(self, cfg):\n super().__init__()\n input_resolution = cfg.INPUT_RESOLUTION\n patch_size = cfg.PATCH_SIZE\n width = cfg.WIDTH\n output_dim = cfg.OUTPUT_DIM\n frozen_layers = cfg.FROZEN_LAYERS\n ft_layers = cfg.FT_LAYERS\n heads = cfg.HEADS\n self.PART = cfg.PART\n scale = width**-0.5\n if self.PART == 'backbone':\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3,\n out_channels=width,\n kernel_size=patch_size,\n stride=patch_size,\n bias=False)\n self.class_embedding = nn.Parameter(scale *\n torch.randn(width)) # [768]\n self.positional_embedding = nn.Parameter(scale * torch.randn(\n (input_resolution // patch_size)**2 + 1, width)) # [50, 768]\n self.ln_pre = LayerNorm(width)\n self.frozen_transformer = FrozenTransformer(\n width, frozen_layers, heads)\n self.frozen_transformer.eval()\n else:\n self.ft_transformer = Transformer(width, ft_layers, heads)\n self.ln_post = LayerNorm(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n def forward(self, x: torch.Tensor):\n if self.PART == 'backbone':\n with torch.no_grad():\n x = self.conv1(\n x) # shape = [*, width, grid, grid] -> [1, 768, 7, 7]\n x = x.reshape(\n x.shape[0], x.shape[1], -1\n ) # shape = [*, width, grid ** 2] -> [1, 768, 49] 49token\n x = x.permute(\n 0, 2, 1) # shape = [*, grid ** 2, width] -> [1, 49, 768]\n x = torch.cat(\n [\n self.class_embedding.to(x.dtype) +\n torch.zeros(x.shape[0],\n 1,\n x.shape[-1],\n dtype=x.dtype,\n device=x.device), x\n ],\n dim=1) # shape = [*, grid ** 2 + 1, width]-> [1, 50, 768]\n x = x + self.positional_embedding.to(x.dtype) # [1, 50, 768]\n x = self.ln_pre(x)\n x = x.permute(1, 0, 2) # NLD -> LND [50, 1, 768]\n x = self.frozen_transformer(x)\n return x\n else:\n sub_x = self.ft_transformer(x)\n sub_x = sub_x.permute(1, 0, 2) # LND -> NLD\n sub_x = self.ln_post(sub_x[:, 0, :])\n sub_x = sub_x @ self.proj\n return sub_x"
},
{
"identifier": "BaseModel",
"path": "scepter/modules/model/base_model.py",
"snippet": "class BaseModel(nn.Module):\n para_dict = {\n 'PRETRAINED_MODEL': {\n 'value': None,\n 'description': 'Pretrained model path.'\n }\n }\n\n def __init__(self, cfg, logger=None):\n super(BaseModel, self).__init__()\n self.logger = logger\n self.cfg = cfg\n self._probe_data = {}\n self._dist_data = {}\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}' + ' ' + super().__repr__()\n\n def load_pretrained_model(self, pretrained_model):\n pass\n\n def register_probe(self, probe_data: dict):\n probe_da, dist_da = register_data(probe_data,\n key_prefix=__class__.__name__)\n self._probe_data.update(probe_da)\n for key in dist_da:\n if key not in self._dist_data:\n self._dist_data[key] = dist_da[key]\n else:\n for k, v in dist_da[key].items():\n if k in self._dist_data[key]:\n self._dist_data[key][k] += v\n else:\n self._dist_data[key][k] = v\n\n def probe_data(self):\n gather_probe_data = gather_data(self._probe_data)\n _dist_data_list = gather_data([self._dist_data])\n if not we.rank == 0:\n self._probe_data = {}\n self._dist_data = {}\n # Iterate recurse the sub class's probe data for time-aware data.\n for k, v in self._modules.items():\n if isinstance(getattr(self, k), BaseModel):\n for kk, vv in getattr(self, k).probe_data().items():\n self._probe_data[f'{k}/{kk}'] = vv\n\n if gather_probe_data is not None:\n # Before processing, just merge the data.\n self._probe_data = merge_gathered_probe(gather_probe_data)\n reduce_dist_data = {}\n if _dist_data_list is not None:\n reduce_dist_data = {}\n for one_data in _dist_data_list:\n for k, v in one_data.items():\n if k in reduce_dist_data:\n for kk, vv in v.items():\n if kk in reduce_dist_data[k]:\n reduce_dist_data[k][kk] += vv\n else:\n reduce_dist_data[k][kk] = vv\n else:\n reduce_dist_data[k] = v\n self._dist_data = reduce_dist_data\n # Iterate recurse the sub class's probe data for reduce data.\n self._probe_data[f'{__class__.__name__}_distribute'] = ProbeData(\n self._dist_data)\n norm_dist_data = {}\n for key, value in self._dist_data.items():\n total = 0\n for k, v in value.items():\n total += v\n norm_v = {}\n for k, v in value.items():\n norm_v[k] = v / total\n norm_dist_data[key] = norm_v\n self._probe_data[f'{__class__.__name__}_norm_distribute'] = ProbeData(\n norm_dist_data)\n ret_data = copy.deepcopy(self._probe_data)\n self._probe_data = {}\n return ret_data\n\n def clear_probe(self):\n self._probe_data.clear()\n\n @staticmethod\n def get_config_template():\n '''\n { \"ENV\" :\n { \"description\" : \"\",\n \"A\" : {\n \"value\": 1.0,\n \"description\": \"\"\n }\n }\n }\n :return:\n '''\n return dict_to_yaml('MODELS',\n __class__.__name__,\n BaseModel.para_dict,\n set_name=True)"
},
{
"identifier": "BACKBONES",
"path": "scepter/modules/model/registry.py",
"snippet": "BACKBONES = Registry('BACKBONES', build_func=build_model)"
},
{
"identifier": "dict_to_yaml",
"path": "scepter/modules/utils/config.py",
"snippet": "def dict_to_yaml(module_name, name, json_config, set_name=False):\n '''\n { \"ENV\" :\n { \"description\" : \"\",\n \"A\" : {\n \"value\": 1.0,\n \"description\": \"\"\n }\n }\n }\n convert std dict to yaml\n :param module_name:\n :param json_config:\n :return:\n '''\n def convert_yaml_style(level=1,\n name='ENV',\n description='ENV PARA',\n default='',\n type_name='',\n is_sys=False):\n new_line = ''\n new_line += '{}# {} DESCRIPTION: {} TYPE: {} default: {}\\n'.format(\n '\\t' * (level - 1), name.upper(), description, type_name,\n f'\\'{default}\\'' if isinstance(default, str) else default)\n if is_sys:\n if name == '-':\n new_line += '{}{}\\n'.format('\\t' * (level - 1), name.upper())\n else:\n new_line += '{}{}:\\n'.format('\\t' * (level - 1), name.upper())\n else:\n # if isinstance(default, str):\n # default = f'\\'{default}\\''\n if default is None:\n new_line += '{}# {}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n else:\n new_line += '{}{}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n return new_line\n\n def parse_dict(json_config,\n level_num,\n parent_key,\n set_name=False,\n name='',\n parent_type='dict'):\n yaml_str = ''\n # print(level_num, json_config)\n if isinstance(json_config, dict):\n if 'value' in json_config:\n value = json_config['value']\n if isinstance(value, dict):\n assert len(value) < 1\n value = None\n description = json_config.get('description', '')\n yaml_str += convert_yaml_style(level=level_num - 1,\n name=parent_key,\n description=description,\n default=value,\n type_name=type(value).__name__)\n return True, yaml_str\n else:\n if len(json_config) < 1:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default='',\n type_name='')\n level_num += 1\n for k, v in json_config.items():\n if k == 'description':\n continue\n if isinstance(v, dict):\n is_final, new_yaml_str = parse_dict(v,\n level_num,\n k,\n parent_type='dict')\n if not is_final and parent_type == 'dict':\n description = v.get('description', '')\n yaml_str += convert_yaml_style(\n level=level_num - 1,\n name=k,\n description=description,\n default='',\n type_name='',\n is_sys=True)\n if not is_final and parent_type == 'list':\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=k,\n type_name='')\n yaml_str += new_yaml_str\n elif isinstance(v, list):\n base_yaml_str = convert_yaml_style(level=level_num - 1,\n name=k,\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += base_yaml_str\n for tup in v:\n is_final, new_yaml_str = parse_dict(\n tup, level_num, '-', parent_type='list')\n if not is_final:\n yaml_str += convert_yaml_style(level=level_num,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += new_yaml_str\n else:\n raise KeyError(\n f'json config {json_config} must be a dict of list'\n )\n\n elif isinstance(json_config, list):\n level_num += 1\n for tup in json_config:\n is_final, new_yaml_str = parse_dict(tup, level_num, '-')\n if not is_final:\n\n yaml_str += convert_yaml_style(level=level_num - 1,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n if set_name:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n raise KeyError(f'json config {json_config} must be a dict')\n return False, yaml_str\n\n if isinstance(json_config, dict):\n first_dict, sec_dict, third_dict = {}, {}, {}\n for key, value in json_config.items():\n if isinstance(value, dict) and len(value) > 0:\n first_dict[key] = value\n elif isinstance(value, dict) and len(value) == 0:\n sec_dict[key] = value\n elif isinstance(value, list):\n third_dict[key] = value\n else:\n raise f'Config {json_config} is illegal'\n json_config = {}\n json_config.update(first_dict)\n json_config.update(sec_dict)\n json_config.update(third_dict)\n\n yaml_str = f'[{module_name}] module yaml examples:\\n'\n level_num = 1\n base_yaml_str = convert_yaml_style(level=level_num,\n name=module_name,\n description='',\n default='',\n type_name='',\n is_sys=True)\n level_num += 1\n\n is_final, new_yaml_str = parse_dict(json_config,\n level_num,\n module_name,\n set_name=isinstance(json_config, list)\n and set_name,\n name=name)\n if not is_final:\n yaml_str += base_yaml_str\n if set_name and not isinstance(json_config, list):\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n yaml_str += new_yaml_str[1:]\n\n return yaml_str"
},
{
"identifier": "FS",
"path": "scepter/modules/utils/file_system.py",
"snippet": "FS = FileSystem()"
}
] | import torch
import torch.nn as nn
from scepter.modules.model.backbone.image.utils.vit import (
MULTI_HEAD_VIT_MODEL, VIT, VIT_MODEL, MULTI_HEAD_VIT_MODEL_Split)
from scepter.modules.model.base_model import BaseModel
from scepter.modules.model.registry import BACKBONES
from scepter.modules.utils.config import dict_to_yaml
from scepter.modules.utils.file_system import FS | 9,573 | 'description': 'The frozen layers number!'
},
'FT_LAYERS': {
'value': 6,
'description': 'The finetune layers number!'
}
}
para_dict.update(VIT_MODEL.para_dict)
def __init__(self, cfg, logger=None):
super().__init__(cfg, logger=logger)
self.pretrain_path = cfg.PRETRAIN_PATH
self.pretrained = cfg.PRETRAINED
self.visual = VIT_MODEL(cfg)
self.frozen_layers = cfg.FROZEN_LAYERS
self.ft_layers = cfg.FT_LAYERS
if self.pretrained:
with FS.get_from(self.pretrain_path,
wait_finish=True) as local_file:
logger.info(f'Loading checkpoint from {self.pretrain_path}')
visual_pre = torch.load(local_file, map_location='cpu')
state_dict_update = self.reformat_state_dict(visual_pre)
self.visual.load_state_dict(state_dict_update, strict=True)
def reformat_state_dict(self, state_dict):
state_dict_update = {}
for k, v in state_dict.items():
if 'transformer.resblocks.' in k:
if int(k.split('.')[2]) < self.frozen_layers:
state_dict_update[k.replace(
'transformer.resblocks',
'frozen_transformer.resblocks')] = v
else:
new_k = k.replace('transformer.resblocks',
'ft_transformer.resblocks')
k_tups = new_k.split('.')
k_tups[2] = str(int(k_tups[2]) - self.frozen_layers)
new_k = '.'.join(k_tups)
state_dict_update[new_k] = v
else:
state_dict_update[k] = v
return state_dict_update
def forward(self, x):
out = self.visual.forward(x)
return out
@staticmethod
def get_config_template():
'''
{ "ENV" :
{ "description" : "",
"A" : {
"value": 1.0,
"description": ""
}
}
}
:return:
'''
return dict_to_yaml('BACKBONES',
__class__.__name__,
SomeFTVisualTransformer.para_dict,
set_name=True)
@BACKBONES.register_class()
class MultiHeadSomeFTVisualTransformer(BaseModel):
'''
INPUT_RESOLUTION: 224
PATCH_SIZE: 32
WIDTH: 768
OUTPUT_DIM: 512
LAYERS: 12
HEADS: 12
'''
para_dict = {}
para_dict.update(MULTI_HEAD_VIT_MODEL.para_dict)
def __init__(self, cfg, logger=None):
super().__init__(cfg, logger=logger)
self.visual = MULTI_HEAD_VIT_MODEL(cfg)
self.multi_head = cfg.MULTI_HEAD
self.frozen_layers = cfg.FROZEN_LAYERS
self.ft_layers = cfg.FT_LAYERS
def forward(self, x):
out = self.visual.forward(x)
return out
@staticmethod
def get_config_template():
'''
{ "ENV" :
{ "description" : "",
"A" : {
"value": 1.0,
"description": ""
}
}
}
:return:
'''
return dict_to_yaml('BACKBONES',
__class__.__name__,
MultiHeadSomeFTVisualTransformer.para_dict,
set_name=True)
@BACKBONES.register_class()
class SomeFTVisualTransformerTwoPart(BaseModel):
'''
INPUT_RESOLUTION: 224
PATCH_SIZE: 32
WIDTH: 768
OUTPUT_DIM: 512
LAYERS: 12
HEADS: 12
'''
para_dict = {}
| # -*- coding: utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(layer):
if isinstance(layer, (nn.Conv1d, nn.Conv2d, nn.Linear)):
layer.weight.data = layer.weight.data.half()
if layer.bias is not None:
layer.bias.data = layer.bias.data.half()
if isinstance(layer, nn.MultiheadAttention):
for attr in [
*[f'{s}_proj_weight' for s in ['in', 'q', 'k', 'v']],
'in_proj_bias', 'bias_k', 'bias_v'
]:
tensor = getattr(layer, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ['text_projection', 'proj']:
if hasattr(layer, name):
attr = getattr(layer, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
@BACKBONES.register_class()
class VisualTransformer(BaseModel):
'''
B/16: Input 224 Patch-size 16 Layers 12 Heads 12 WIDTH 768
B/32: Input 224 Patch-size 32 Layers 12 Heads 12 WIDTH 768
L/16: Input 224/336 Patch-size 16 Layers 24 Heads 16 WIDTH 1024
L/14: Input 224/336 Patch-size 14 Layers 24 Heads 16 WIDTH 1024
L/32: Input 224 Patch-size 32 Layers 24 Heads 16 WIDTH 1024
H/14: Input ...
INPUT_RESOLUTION: 224
PATCH_SIZE: 32
WIDTH: 768
OUTPUT_DIM: 512
LAYERS: 12
HEADS: 12
'''
para_dict = {
'PRETRAIN_PATH': {
'value': '',
'description': 'The file path of pretrained model!'
},
'PRETRAINED': {
'value': True,
'description': 'Use the pretrained model or not!'
}
}
para_dict.update(VIT.para_dict)
def __init__(self, cfg, logger=None):
super().__init__(cfg, logger=logger)
self.pretrain_path = cfg.PRETRAIN_PATH
self.pretrained = cfg.PRETRAINED
self.visual = VIT(cfg)
use_proj = cfg.get('USE_PROJ', True)
if self.pretrained:
with FS.get_from(self.pretrain_path,
wait_finish=True) as local_file:
logger.info(f'Loading checkpoint from {self.pretrain_path}')
visual_pre = torch.load(local_file, map_location='cpu')
if not use_proj:
visual_pre.pop('proj')
if visual_pre['conv1.weight'].dtype == torch.float16:
convert_weights(self.visual)
self.visual.load_state_dict(visual_pre, strict=True)
def forward(self, x):
out = self.visual.forward(x)
return out
@staticmethod
def get_config_template():
'''
{ "ENV" :
{ "description" : "",
"A" : {
"value": 1.0,
"description": ""
}
}
}
:return:
'''
return dict_to_yaml('BACKBONES',
__class__.__name__,
VisualTransformer.para_dict,
set_name=True)
@BACKBONES.register_class()
class SomeFTVisualTransformer(BaseModel):
'''
INPUT_RESOLUTION: 224
PATCH_SIZE: 32
WIDTH: 768
OUTPUT_DIM: 512
LAYERS: 12
HEADS: 12
'''
para_dict = {
'PRETRAIN_PATH': {
'value': '',
'description': 'The file path of pretrained model!'
},
'PRETRAINED': {
'value': True,
'description': 'Use the pretrained model or not!'
},
'FROZEN_LAYERS': {
'value': 6,
'description': 'The frozen layers number!'
},
'FT_LAYERS': {
'value': 6,
'description': 'The finetune layers number!'
}
}
para_dict.update(VIT_MODEL.para_dict)
def __init__(self, cfg, logger=None):
super().__init__(cfg, logger=logger)
self.pretrain_path = cfg.PRETRAIN_PATH
self.pretrained = cfg.PRETRAINED
self.visual = VIT_MODEL(cfg)
self.frozen_layers = cfg.FROZEN_LAYERS
self.ft_layers = cfg.FT_LAYERS
if self.pretrained:
with FS.get_from(self.pretrain_path,
wait_finish=True) as local_file:
logger.info(f'Loading checkpoint from {self.pretrain_path}')
visual_pre = torch.load(local_file, map_location='cpu')
state_dict_update = self.reformat_state_dict(visual_pre)
self.visual.load_state_dict(state_dict_update, strict=True)
def reformat_state_dict(self, state_dict):
state_dict_update = {}
for k, v in state_dict.items():
if 'transformer.resblocks.' in k:
if int(k.split('.')[2]) < self.frozen_layers:
state_dict_update[k.replace(
'transformer.resblocks',
'frozen_transformer.resblocks')] = v
else:
new_k = k.replace('transformer.resblocks',
'ft_transformer.resblocks')
k_tups = new_k.split('.')
k_tups[2] = str(int(k_tups[2]) - self.frozen_layers)
new_k = '.'.join(k_tups)
state_dict_update[new_k] = v
else:
state_dict_update[k] = v
return state_dict_update
def forward(self, x):
out = self.visual.forward(x)
return out
@staticmethod
def get_config_template():
'''
{ "ENV" :
{ "description" : "",
"A" : {
"value": 1.0,
"description": ""
}
}
}
:return:
'''
return dict_to_yaml('BACKBONES',
__class__.__name__,
SomeFTVisualTransformer.para_dict,
set_name=True)
@BACKBONES.register_class()
class MultiHeadSomeFTVisualTransformer(BaseModel):
'''
INPUT_RESOLUTION: 224
PATCH_SIZE: 32
WIDTH: 768
OUTPUT_DIM: 512
LAYERS: 12
HEADS: 12
'''
para_dict = {}
para_dict.update(MULTI_HEAD_VIT_MODEL.para_dict)
def __init__(self, cfg, logger=None):
super().__init__(cfg, logger=logger)
self.visual = MULTI_HEAD_VIT_MODEL(cfg)
self.multi_head = cfg.MULTI_HEAD
self.frozen_layers = cfg.FROZEN_LAYERS
self.ft_layers = cfg.FT_LAYERS
def forward(self, x):
out = self.visual.forward(x)
return out
@staticmethod
def get_config_template():
'''
{ "ENV" :
{ "description" : "",
"A" : {
"value": 1.0,
"description": ""
}
}
}
:return:
'''
return dict_to_yaml('BACKBONES',
__class__.__name__,
MultiHeadSomeFTVisualTransformer.para_dict,
set_name=True)
@BACKBONES.register_class()
class SomeFTVisualTransformerTwoPart(BaseModel):
'''
INPUT_RESOLUTION: 224
PATCH_SIZE: 32
WIDTH: 768
OUTPUT_DIM: 512
LAYERS: 12
HEADS: 12
'''
para_dict = {} | para_dict.update(MULTI_HEAD_VIT_MODEL_Split.para_dict) | 3 | 2023-12-21 02:01:48+00:00 | 12k |
pigeonai-org/ViDove | src/task.py | [
{
"identifier": "SrtScript",
"path": "src/srt_util/srt.py",
"snippet": "class SrtScript(object):\n def __init__(self, src_lang, tgt_lang, segments, domain=\"General\") -> None:\n self.domain = domain\n self.src_lang = src_lang\n self.tgt_lang = tgt_lang\n self.segments = [SrtSegment(self.src_lang, self.tgt_lang, seg) for seg in segments]\n\n if self.domain != \"General\":\n if os.path.exists(f\"{dict_path}/{self.domain}\") and\\\n os.path.exists(f\"{dict_path}/{self.domain}/{src_lang}.csv\") and os.path.exists(f\"{dict_path}/{self.domain}/{tgt_lang}.csv\" ):\n # TODO: load dictionary\n self.dict = dict_util.term_dict(f\"{dict_path}/{self.domain}\", src_lang, tgt_lang)\n ...\n else:\n logging.error(f\"domain {self.domain} or related dictionary({src_lang} or {tgt_lang}) doesn't exist, fallback to general domain, this will disable correct_with_force_term and spell_check_term\")\n self.domain = \"General\"\n\n\n @classmethod\n def parse_from_srt_file(cls, src_lang, tgt_lang, domain, path = None, srt_str = None):\n if path is not None:\n with open(path, 'r', encoding=\"utf-8\") as f:\n script_lines = [line.rstrip() for line in f.readlines()]\n elif srt_str is not None:\n script_lines = srt_str.splitlines()\n else:\n raise RuntimeError(\"need input Srt Path or Srt String\")\n\n bilingual = False\n if script_lines[2] != '' and script_lines[3] != '':\n bilingual = True\n segments = []\n if bilingual:\n for i in range(0, len(script_lines), 5):\n segments.append(list(script_lines[i:i + 5]))\n else:\n for i in range(0, len(script_lines), 4):\n segments.append(list(script_lines[i:i + 4]))\n return cls(src_lang, tgt_lang, segments, domain)\n\n def merge_segs(self, idx_list) -> SrtSegment:\n \"\"\"\n Merge entire segment list to a single segment\n :param idx_list: List of index to merge\n :return: Merged list\n \"\"\"\n if not idx_list:\n raise NotImplementedError('Empty idx_list')\n seg_result = deepcopy(self.segments[idx_list[0]])\n if len(idx_list) == 1:\n return seg_result\n\n for idx in range(1, len(idx_list)):\n seg_result += self.segments[idx_list[idx]]\n\n return seg_result\n\n def form_whole_sentence(self):\n \"\"\"\n Concatenate or Strip sentences and reconstruct segments list. This is because of\n improper segmentation from openai-whisper.\n :return: None\n \"\"\"\n logging.info(\"Forming whole sentences...\")\n merge_list = [] # a list of indices that should be merged e.g. [[0], [1, 2, 3, 4], [5, 6], [7]]\n sentence = []\n ending_puncs = punctuation_dict[self.src_lang][\"sentence_end\"]\n # Get each entire sentence of distinct segments, fill indices to merge_list\n for i, seg in enumerate(self.segments):\n if seg.source_text[-1] in ending_puncs and len(seg.source_text) > 10 and 'vs.' not in seg.source_text:\n sentence.append(i)\n merge_list.append(sentence)\n sentence = []\n else:\n sentence.append(i)\n\n # Reconstruct segments, each with an entire sentence\n segments = []\n for idx_list in merge_list:\n if len(idx_list) > 1:\n logging.info(\"merging segments: %s\", idx_list)\n segments.append(self.merge_segs(idx_list))\n\n self.segments = segments\n\n def remove_trans_punctuation(self):\n \"\"\"\n Post-process: remove all punc after translation and split\n :return: None\n \"\"\"\n for i, seg in enumerate(self.segments):\n seg.remove_trans_punc()\n logging.info(\"Removed punctuation in translation.\")\n\n def set_translation(self, translate: str, id_range: tuple, model, video_name, video_link=None):\n start_seg_id = id_range[0]\n end_seg_id = id_range[1]\n\n src_text = \"\"\n for i, seg in enumerate(self.segments[start_seg_id - 1:end_seg_id]):\n src_text += seg.source_text\n src_text += '\\n\\n'\n\n def inner_func(target, input_str):\n # handling merge sentences issue.\n response = openai.ChatCompletion.create(\n model=\"gpt-4\",\n messages=[\n {\"role\": \"system\",\n \"content\": \"Your task is to merge or split sentences into a specified number of lines as required. You need to ensure the meaning of the sentences as much as possible, but when necessary, a sentence can be divided into two lines for output\"},\n {\"role\": \"system\", \"content\": \"Note: You only need to output the processed {} sentences. If you need to output a sequence number, please separate it with a colon.\".format(self.tgt_lang)},\n {\"role\": \"user\", \"content\": 'Please split or combine the following sentences into {} sentences:\\n{}'.format(target, input_str)}\n ],\n temperature=0.15\n )\n return response['choices'][0]['message']['content'].strip()\n\n # handling merge sentences issue.\n lines = translate.split('\\n\\n')\n if len(lines) < (end_seg_id - start_seg_id + 1):\n count = 0\n solved = True\n while count < 5 and len(lines) != (end_seg_id - start_seg_id + 1):\n count += 1\n print(\"Solving Unmatched Lines|iteration {}\".format(count))\n logging.error(\"Solving Unmatched Lines|iteration {}\".format(count))\n\n flag = True\n while flag:\n flag = False\n try:\n translate = inner_func(end_seg_id - start_seg_id + 1, translate)\n except Exception as e:\n print(\"An error has occurred during solving unmatched lines:\", e)\n print(\"Retrying...\")\n logging.error(\"An error has occurred during solving unmatched lines:\", e)\n logging.error(\"Retrying...\")\n flag = True\n lines = translate.split('\\n')\n\n if len(lines) < (end_seg_id - start_seg_id + 1):\n solved = False\n print(\"Failed Solving unmatched lines, Manually parse needed\")\n logging.error(\"Failed Solving unmatched lines, Manually parse needed\")\n\n # FIXME: put the error log in our log file\n if not os.path.exists(\"./logs\"):\n os.mkdir(\"./logs\")\n if video_link:\n log_file = \"./logs/log_link.csv\"\n log_exist = os.path.exists(log_file)\n with open(log_file, \"a\") as log:\n if not log_exist:\n log.write(\"range_of_text,iterations_solving,solved,file_length,video_link\" + \"\\n\")\n log.write(str(id_range) + ',' + str(count) + ',' + str(solved) + ',' + str(\n len(self.segments)) + ',' + video_link + \"\\n\")\n else:\n log_file = \"./logs/log_name.csv\"\n log_exist = os.path.exists(log_file)\n with open(log_file, \"a\") as log:\n if not log_exist:\n log.write(\"range_of_text,iterations_solving,solved,file_length,video_name\" + \"\\n\")\n log.write(str(id_range) + ',' + str(count) + ',' + str(solved) + ',' + str(\n len(self.segments)) + ',' + video_name + \"\\n\")\n # print(lines)\n\n for i, seg in enumerate(self.segments[start_seg_id - 1:end_seg_id]):\n # naive way to due with merge translation problem\n # TODO: need a smarter solution\n\n if i < len(lines):\n if \"Note:\" in lines[i]: # to avoid note\n lines.remove(lines[i])\n max_num -= 1\n if i == len(lines) - 1:\n break\n if lines[i][0] in [' ', '\\n']:\n lines[i] = lines[i][1:]\n seg.translation = lines[i]\n\n def split_seg(self, seg, text_threshold, time_threshold):\n # evenly split seg to 2 parts and add new seg into self.segments\n # ignore the initial comma to solve the recursion problem\n src_comma_str = punctuation_dict[self.src_lang][\"comma\"]\n tgt_comma_str = punctuation_dict[self.tgt_lang][\"comma\"]\n\n if len(seg.source_text) > 2:\n if seg.source_text[:2] == src_comma_str:\n seg.source_text = seg.source_text[2:]\n if seg.translation[0] == tgt_comma_str:\n seg.translation = seg.translation[1:]\n\n source_text = seg.source_text\n translation = seg.translation\n\n # split the text based on commas\n src_commas = [m.start() for m in re.finditer(src_comma_str, source_text)]\n trans_commas = [m.start() for m in re.finditer(tgt_comma_str, translation)]\n if len(src_commas) != 0:\n src_split_idx = src_commas[len(src_commas) // 2] if len(src_commas) % 2 == 1 else src_commas[\n len(src_commas) // 2 - 1]\n else:\n # split the text based on spaces\n src_space = [m.start() for m in re.finditer(' ', source_text)]\n if len(src_space) > 0:\n src_split_idx = src_space[len(src_space) // 2] if len(src_space) % 2 == 1 else src_space[\n len(src_space) // 2 - 1]\n else:\n src_split_idx = 0\n\n if len(trans_commas) != 0:\n trans_split_idx = trans_commas[len(trans_commas) // 2] if len(trans_commas) % 2 == 1 else trans_commas[\n len(trans_commas) // 2 - 1]\n else:\n trans_split_idx = len(translation) // 2\n\n # to avoid split English word\n for i in range(trans_split_idx, len(translation)):\n if not translation[i].encode('utf-8').isalpha():\n trans_split_idx = i\n break\n\n # split the time duration based on text length\n time_split_ratio = trans_split_idx / (len(seg.translation) - 1)\n\n src_seg1 = source_text[:src_split_idx]\n src_seg2 = source_text[src_split_idx:]\n trans_seg1 = translation[:trans_split_idx]\n trans_seg2 = translation[trans_split_idx:]\n\n start_seg1 = seg.start\n end_seg1 = start_seg2 = seg.start + (seg.end - seg.start) * time_split_ratio\n end_seg2 = seg.end\n\n seg1_dict = {}\n seg1_dict['text'] = src_seg1\n seg1_dict['start'] = start_seg1\n seg1_dict['end'] = end_seg1\n seg1 = SrtSegment(self.src_lang, self.tgt_lang, seg1_dict)\n seg1.translation = trans_seg1\n\n seg2_dict = {}\n seg2_dict['text'] = src_seg2\n seg2_dict['start'] = start_seg2\n seg2_dict['end'] = end_seg2\n seg2 = SrtSegment(self.src_lang, self.tgt_lang, seg2_dict)\n seg2.translation = trans_seg2\n\n result_list = []\n if len(seg1.translation) > text_threshold and (seg1.end - seg1.start) > time_threshold:\n result_list += self.split_seg(seg1, text_threshold, time_threshold)\n else:\n result_list.append(seg1)\n\n if len(seg2.translation) > text_threshold and (seg2.end - seg2.start) > time_threshold:\n result_list += self.split_seg(seg2, text_threshold, time_threshold)\n else:\n result_list.append(seg2)\n\n return result_list\n\n def check_len_and_split(self, text_threshold=30, time_threshold=1.0):\n # if sentence length >= threshold and sentence duration > time_threshold, split this segments to two\n logging.info(\"performing check_len_and_split\")\n segments = []\n for i, seg in enumerate(self.segments):\n if len(seg.translation) > text_threshold and (seg.end - seg.start) > time_threshold:\n seg_list = self.split_seg(seg, text_threshold, time_threshold)\n logging.info(\"splitting segment {} in to {} parts\".format(i + 1, len(seg_list)))\n segments += seg_list\n else:\n segments.append(seg)\n\n self.segments = segments\n logging.info(\"check_len_and_split finished\")\n\n def check_len_and_split_range(self, range, text_threshold=30, time_threshold=1.0):\n # DEPRECATED\n # if sentence length >= text_threshold, split this segments to two\n start_seg_id = range[0]\n end_seg_id = range[1]\n extra_len = 0\n segments = []\n for i, seg in enumerate(self.segments[start_seg_id - 1:end_seg_id]):\n if len(seg.translation) > text_threshold and (seg.end - seg.start) > time_threshold:\n seg_list = self.split_seg(seg, text_threshold, time_threshold)\n segments += seg_list\n extra_len += len(seg_list) - 1\n else:\n segments.append(seg)\n\n self.segments[start_seg_id - 1:end_seg_id] = segments\n return extra_len\n\n def correct_with_force_term(self):\n ## force term correction\n logging.info(\"performing force term correction\")\n\n # check domain\n if self.domain == \"General\":\n logging.info(\"General domain could not perform correct_with_force_term. skip this step.\")\n pass\n else:\n keywords = list(self.dict.keys())\n keywords.sort(key=lambda x: len(x), reverse=True)\n\n for word in keywords:\n for i, seg in enumerate(self.segments):\n if word in seg.source_text.lower():\n seg.source_text = re.sub(fr\"({word}es|{word}s?)\\b\", \"{}\".format(self.dict.get(word)),\n seg.source_text, flags=re.IGNORECASE)\n logging.info(\n \"replace term: \" + word + \" --> \" + self.dict.get(word) + \" in time stamp {}\".format(\n i + 1))\n logging.info(\"source text becomes: \" + seg.source_text)\n\n\n def fetchfunc(self, word, threshold):\n import enchant\n result = word\n distance = 0\n threshold = threshold * len(word)\n temp = \"\"\n for matched in self.dict:\n if (\" \" in matched and \" \" in word) or (\" \" not in matched and \" \" not in word):\n if enchant.utils.levenshtein(word, matched) < enchant.utils.levenshtein(word, temp):\n temp = matched\n if enchant.utils.levenshtein(word, temp) < threshold:\n distance = enchant.utils.levenshtein(word, temp)\n result = temp\n return distance, result\n\n def extract_words(self, sentence, n):\n # this function split the sentence to chunks by n of words\n # e.g. sentence: \"this, is a sentence\", n = 2\n # result: [\"this,\", \"is\", \"a\", [\"sentence\"], [\"this,\", \"is\"], \"is a\", \"a sentence\"]\n words = sentence.split()\n res = []\n for j in range(n, 0, -1):\n res += [words[i:i + j] for i in range(len(words) - j + 1)]\n return res\n\n def spell_check_term(self):\n logging.info(\"performing spell check\")\n\n # check domain\n if self.domain == \"General\":\n logging.info(\"General domain could not perform spell_check_term. skip this step.\")\n pass\n\n import enchant\n dict = enchant.Dict('en_US')\n\n for seg in tqdm(self.segments):\n ready_words = self.extract_words(seg.source_text, 2)\n for i in range(len(ready_words)):\n word_list = ready_words[i]\n word, real_word, pos = self.get_real_word(word_list)\n if not dict.check(real_word) and (real_word not in self.dict.keys()):\n distance, correct_term = self.fetchfunc(real_word, 0.3)\n if distance != 0:\n seg.source_text = re.sub(word[:pos], correct_term, seg.source_text, flags=re.IGNORECASE)\n logging.info(\n \"replace: \" + word[:pos] + \" to \" + correct_term + \"\\t distance = \" + str(distance))\n\n def get_real_word(self, word_list: list):\n word = \"\"\n for w in word_list:\n word += f\"{w} \"\n word = word[:-1] # \"this, is\"\n if word[-2:] == \".\\n\":\n real_word = word[:-2].lower()\n n = -2\n elif word[-1:] in [\".\", \"\\n\", \",\", \"!\", \"?\"]:\n real_word = word[:-1].lower()\n n = -1\n else:\n real_word = word.lower()\n n = 0\n return word, real_word, len(word) + n\n\n ## WRITE AND READ FUNCTIONS ##\n\n def get_source_only(self):\n # return a string with pure source text\n result = \"\"\n for i, seg in enumerate(self.segments):\n result += f'{seg.source_text}\\n\\n\\n' # f'SENTENCE {i+1}: {seg.source_text}\\n\\n\\n'\n\n return result\n\n def reform_src_str(self):\n result = \"\"\n for i, seg in enumerate(self.segments):\n result += f'{i + 1}\\n'\n result += str(seg)\n return result\n\n def reform_trans_str(self):\n result = \"\"\n for i, seg in enumerate(self.segments):\n result += f'{i + 1}\\n'\n result += seg.get_trans_str()\n return result\n\n def form_bilingual_str(self):\n result = \"\"\n for i, seg in enumerate(self.segments):\n result += f'{i + 1}\\n'\n result += seg.get_bilingual_str()\n return result\n\n def write_srt_file_src(self, path: str):\n # write srt file to path\n with open(path, \"w\", encoding='utf-8') as f:\n f.write(self.reform_src_str())\n pass\n\n def write_srt_file_translate(self, path: str):\n logging.info(\"writing to \" + path)\n with open(path, \"w\", encoding='utf-8') as f:\n f.write(self.reform_trans_str())\n pass\n\n def write_srt_file_bilingual(self, path: str):\n logging.info(\"writing to \" + path)\n with open(path, \"w\", encoding='utf-8') as f:\n f.write(self.form_bilingual_str())\n pass\n\n def realtime_write_srt(self, path, range, length, idx):\n # DEPRECATED\n start_seg_id = range[0]\n end_seg_id = range[1]\n with open(path, \"a\", encoding='utf-8') as f:\n # for i, seg in enumerate(self.segments[start_seg_id-1:end_seg_id+length]):\n # f.write(f'{i+idx}\\n')\n # f.write(seg.get_trans_str())\n for i, seg in enumerate(self.segments):\n if i < range[0] - 1: continue\n if i >= range[1] + length: break\n f.write(f'{i + idx}\\n')\n f.write(seg.get_trans_str())\n pass\n\n def realtime_bilingual_write_srt(self, path, range, length, idx):\n # DEPRECATED\n start_seg_id = range[0]\n end_seg_id = range[1]\n with open(path, \"a\", encoding='utf-8') as f:\n for i, seg in enumerate(self.segments):\n if i < range[0] - 1: continue\n if i >= range[1] + length: break\n f.write(f'{i + idx}\\n')\n f.write(seg.get_bilingual_str())\n pass"
},
{
"identifier": "srt2ass",
"path": "src/srt_util/srt2ass.py",
"snippet": "def srt2ass(input_file, sub_style, is_split, split_method):\n if '.ass' in input_file:\n return input_file\n\n if not os.path.isfile(input_file):\n print(input_file + ' not exist')\n return\n\n src = fileopen(input_file)\n srt_content = src[0]\n # encoding = src[1] # Will not encode so do not need to pass codec para\n src = ''\n utf8bom = ''\n\n if u'\\ufeff' in srt_content:\n srt_content = srt_content.replace(u'\\ufeff', '')\n utf8bom = u'\\ufeff'\n \n srt_content = srt_content.replace(\"\\r\", \"\")\n lines = [x.strip() for x in srt_content.split(\"\\n\") if x.strip()]\n subLines = ''\n dlgLines = '' # dialogue line\n lineCount = 0\n output_file = '.'.join(input_file.split('.')[:-1])\n output_file += '.ass'\n\n for ln in range(len(lines)):\n line = lines[ln]\n if line.isdigit() and re.match('-?\\d\\d:\\d\\d:\\d\\d', lines[(ln+1)]):\n if dlgLines:\n subLines += dlgLines + \"\\n\"\n dlgLines = ''\n lineCount = 0\n continue\n else:\n if re.match('-?\\d\\d:\\d\\d:\\d\\d', line):\n line = line.replace('-0', '0')\n if sub_style =='default':\n dlgLines += 'Dialogue: 0,' + line + ',default,,0,0,0,,'\n elif sub_style =='ikedaCN':\n dlgLines += 'Dialogue: 0,' + line + ',池田字幕1080p,,0,0,0,,'\n elif sub_style == 'sugawaraCN':\n dlgLines += 'Dialogue: 0,' + line + ',中字 1080P,,0,0,0,,'\n elif sub_style == 'kaedeCN':\n dlgLines += 'Dialogue: 0,' + line + ',den SR红色,,0,0,0,,'\n elif sub_style == 'taniguchiCN':\n dlgLines += 'Dialogue: 0,' + line + ',正文_1080P,,0,0,0,,'\n elif sub_style == 'asukaCN':\n dlgLines += 'Dialogue: 0,' + line + ',DEFAULT1,,0,0,0,,'\n elif sub_style == 'starPigeon':\n dlgLines += 'Dialogue: 0,' + line + ',Starcraft 2 下(一般字幕),,0,0,0,,'\n else:\n if lineCount < 2:\n dlg_string = line\n if is_split == \"Yes\" and split_method == 'Modest':\n # do not split if space proceed and followed by non-ASC-II characters\n # do not split if space followed by word that less than 5 characters\n split_string = re.sub(r'(?<=[^\\x00-\\x7F])\\s+(?=[^\\x00-\\x7F])(?=\\w{5})', r'|', dlg_string)\n # print(split_string)\n if len(split_string.split('|')) > 1:\n dlgLines += (split_string.replace('|', \"(adjust_required)\\n\" + dlgLines)) + \"(adjust_required)\"\n else:\n dlgLines += line\n elif is_split == \"Yes\" and split_method == 'Aggressive':\n # do not split if space proceed and followed by non-ASC-II characters\n # split at all the rest spaces\n split_string = re.sub(r'(?<=[^\\x00-\\x7F])\\s+(?=[^\\x00-\\x7F])', r'|', dlg_string)\n if len(split_string.split('|')) > 1:\n dlgLines += (split_string.replace('|',\"(adjust_required)\\n\" + dlgLines)) + \"(adjust_required)\"\n else:\n dlgLines += line\n else:\n dlgLines += line\n else:\n dlgLines += \"\\n\" + line\n lineCount += 1\n ln += 1\n\n\n subLines += dlgLines + \"\\n\"\n\n subLines = re.sub(r'\\d(\\d:\\d{2}:\\d{2}),(\\d{2})\\d', '\\\\1.\\\\2', subLines)\n subLines = re.sub(r'\\s+-->\\s+', ',', subLines)\n # replace style\n # subLines = re.sub(r'<([ubi])>', \"{\\\\\\\\\\g<1>1}\", subLines)\n # subLines = re.sub(r'</([ubi])>', \"{\\\\\\\\\\g<1>0}\", subLines)\n # subLines = re.sub(r'<font\\s+color=\"?#(\\w{2})(\\w{2})(\\w{2})\"?>', \"{\\\\\\\\c&H\\\\3\\\\2\\\\1&}\", subLines)\n # subLines = re.sub(r'</font>', \"\", subLines)\n\n if sub_style == 'default':\n head_name = 'head_str_default'\n elif sub_style == 'ikedaCN':\n head_name = 'head_str_ikeda'\n elif sub_style == 'sugawaraCN':\n head_name = 'head_str_sugawara'\n elif sub_style == 'kaedeCN':\n head_name = 'head_str_kaede'\n elif sub_style == \"taniguchiCN\":\n head_name = 'head_str_taniguchi'\n elif sub_style == 'asukaCN':\n head_name = 'head_str_asuka'\n elif sub_style == 'starPigeon':\n head_name = 'head_str_pigeon'\n\n head_str = STYLE_DICT.get(head_name)\n output_str = utf8bom + head_str + '\\n' + subLines\n # encode again for head string\n output_str = output_str.encode('utf8')\n\n with open(output_file, 'wb') as output:\n output.write(output_str)\n\n output_file = output_file.replace('\\\\', '\\\\\\\\')\n output_file = output_file.replace('/', '//')\n return output_file"
},
{
"identifier": "get_translation",
"path": "src/translators/translation.py",
"snippet": "def get_translation(srt, model, video_name, prompt = None, chunk_size = 1000):\n # print(srt.get_source_only())\n script_arr, range_arr = split_script(srt.get_source_only(),chunk_size)\n translate(srt, script_arr, range_arr, model, video_name, task=prompt)\n pass"
},
{
"identifier": "prompt_selector",
"path": "src/translators/translation.py",
"snippet": "def prompt_selector(src_lang, tgt_lang, domain):\n language_map = {\n \"EN\": \"English\",\n \"ZH\": \"Chinese\",\n \"ES\": \"Spanish\",\n \"FR\": \"France\",\n \"DE\": \"Germany\",\n \"RU\": \"Russian\",\n \"JA\": \"Japanese\",\n \"AR\": \"Arabic\",\n }\n try:\n src_lang = language_map[src_lang]\n tgt_lang = language_map[tgt_lang]\n except:\n print(\"Unsupported language, is your abbreviation correct?\")\n logging.info(\"Unsupported language detected\")\n prompt = f\"\"\"\n you are a translation assistant, your job is to translate a video in domain of {domain} from {src_lang} to {tgt_lang}, \n you will be provided with a segement in {src_lang} parsed by line, where your translation text should keep the original \n meaning and the number of lines.\n \"\"\"\n return prompt"
}
] | import threading
import time
import openai
import logging
import subprocess
import torch
import stable_whisper
import shutil
from pytube import YouTube
from os import getenv, getcwd
from pathlib import Path
from enum import Enum, auto
from src.srt_util.srt import SrtScript
from src.srt_util.srt2ass import srt2ass
from time import time, strftime, gmtime, sleep
from src.translators.translation import get_translation, prompt_selector
from datetime import datetime | 8,219 | def __init__(self, task_id, task_local_dir, task_cfg):
"""
Constructor for initializing a task with its ID, local directory, and configuration settings.
"""
self.__status_lock = threading.Lock()
self.__status = TaskStatus.CREATED
self.gpu_status = 0
openai.api_key = getenv("OPENAI_API_KEY")
self.task_id = task_id
self.task_local_dir = task_local_dir
self.ASR_setting = task_cfg["ASR"]
self.translation_setting = task_cfg["translation"]
self.translation_model = self.translation_setting["model"]
self.output_type = task_cfg["output_type"]
self.target_lang = task_cfg["target_lang"]
self.source_lang = task_cfg["source_lang"]
self.field = task_cfg["field"]
self.pre_setting = task_cfg["pre_process"]
self.post_setting = task_cfg["post_process"]
self.audio_path = None
self.SRT_Script = None
self.result = None
self.s_t = None
self.t_e = None
self.t_s = time()
# logging setting
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt, handlers=[
logging.FileHandler(
"{}/{}_{}.log".format(task_local_dir, f"task_{task_id}", datetime.now().strftime("%m%d%Y_%H%M%S")),
'w', encoding='utf-8')])
print(f"Task ID: {self.task_id}")
logging.info(f"Task ID: {self.task_id}")
logging.info(f"{self.source_lang} -> {self.target_lang} task in {self.field}")
logging.info(f"Translation Model: {self.translation_model}")
logging.info(f"subtitle_type: {self.output_type['subtitle']}")
logging.info(f"video_ouput: {self.output_type['video']}")
logging.info(f"bilingual_ouput: {self.output_type['bilingual']}")
logging.info("Pre-process setting:")
for key in self.pre_setting:
logging.info(f"{key}: {self.pre_setting[key]}")
logging.info("Post-process setting:")
for key in self.post_setting:
logging.info(f"{key}: {self.post_setting[key]}")
@staticmethod
def fromYoutubeLink(youtube_url, task_id, task_dir, task_cfg):
"""
Creates a YoutubeTask instance from a YouTube URL.
"""
return YoutubeTask(task_id, task_dir, task_cfg, youtube_url)
@staticmethod
def fromAudioFile(audio_path, task_id, task_dir, task_cfg):
"""
Creates an AudioTask instance from an audio file path.
"""
return AudioTask(task_id, task_dir, task_cfg, audio_path)
@staticmethod
def fromVideoFile(video_path, task_id, task_dir, task_cfg):
"""
Creates a VideoTask instance from a video file path.
"""
return VideoTask(task_id, task_dir, task_cfg, video_path)
@staticmethod
def fromSRTFile(srt_path, task_id, task_dir, task_cfg):
"""
Creates a SRTTask instance from a srt file path.
"""
return SRTTask(task_id, task_dir, task_cfg, srt_path)
# Module 1 ASR: audio --> SRT_script
def get_srt_class(self):
"""
Handles the ASR module to convert audio to SRT script format.
"""
# Instead of using the script_en variable directly, we'll use script_input
# TODO: setup ASR module like translator
self.status = TaskStatus.INITIALIZING_ASR
if self.SRT_Script != None:
logging.info("SRT input mode, skip ASR Module")
return
method = self.ASR_setting["whisper_config"]["method"]
whisper_model = self.ASR_setting["whisper_config"]["whisper_model"]
src_srt_path = self.task_local_dir.joinpath(f"task_{self.task_id}_{self.source_lang}.srt")
if not Path.exists(src_srt_path):
# extract script from audio
logging.info("extract script from audio")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.info(f"Module 1: ASR inference method: {method}")
init_prompt = "Hello, welcome to my lecture." if self.source_lang == "EN" else ""
if method == "api":
with open(self.audio_path, 'rb') as audio_file:
transcript = openai.Audio.transcribe(model="whisper-1", file=audio_file, response_format="srt", language=self.source_lang.lower(), prompt=init_prompt)
elif method == "stable":
model = stable_whisper.load_model(whisper_model, device)
transcript = model.transcribe(str(self.audio_path), regroup=False,
initial_prompt=init_prompt)
(
transcript
.split_by_punctuation(['.', '。', '?'])
.merge_by_gap(.15, max_words=3)
.merge_by_punctuation([' '])
.split_by_punctuation(['.', '。', '?'])
)
transcript = transcript.to_dict()
transcript = transcript['segments']
# after get the transcript, release the gpu resource
torch.cuda.empty_cache()
else:
raise RuntimeError(f"unavaliable ASR inference method: {method}")
if isinstance(transcript, str):
|
class TaskStatus(str, Enum):
"""
An enumeration class representing the different statuses a task can have in the translation pipeline.
TODO: add translation progress indicator (%).
"""
CREATED = 'CREATED'
INITIALIZING_ASR = 'INITIALIZING_ASR'
PRE_PROCESSING = 'PRE_PROCESSING'
TRANSLATING = 'TRANSLATING'
POST_PROCESSING = 'POST_PROCESSING'
OUTPUT_MODULE = 'OUTPUT_MODULE'
class Task:
"""
A class representing a task in the translation pipeline. It includes methods for handling different stages of the task.
If one want to add a new entry type (e.g. add support for different video formats),
one should extend this class and override the `run` method.
"""
@property
def status(self):
with self.__status_lock:
return self.__status
@status.setter
def status(self, new_status):
"""
Sets the new status of the task, ensuring thread safety with a lock.
"""
with self.__status_lock:
self.__status = new_status
def __init__(self, task_id, task_local_dir, task_cfg):
"""
Constructor for initializing a task with its ID, local directory, and configuration settings.
"""
self.__status_lock = threading.Lock()
self.__status = TaskStatus.CREATED
self.gpu_status = 0
openai.api_key = getenv("OPENAI_API_KEY")
self.task_id = task_id
self.task_local_dir = task_local_dir
self.ASR_setting = task_cfg["ASR"]
self.translation_setting = task_cfg["translation"]
self.translation_model = self.translation_setting["model"]
self.output_type = task_cfg["output_type"]
self.target_lang = task_cfg["target_lang"]
self.source_lang = task_cfg["source_lang"]
self.field = task_cfg["field"]
self.pre_setting = task_cfg["pre_process"]
self.post_setting = task_cfg["post_process"]
self.audio_path = None
self.SRT_Script = None
self.result = None
self.s_t = None
self.t_e = None
self.t_s = time()
# logging setting
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt, handlers=[
logging.FileHandler(
"{}/{}_{}.log".format(task_local_dir, f"task_{task_id}", datetime.now().strftime("%m%d%Y_%H%M%S")),
'w', encoding='utf-8')])
print(f"Task ID: {self.task_id}")
logging.info(f"Task ID: {self.task_id}")
logging.info(f"{self.source_lang} -> {self.target_lang} task in {self.field}")
logging.info(f"Translation Model: {self.translation_model}")
logging.info(f"subtitle_type: {self.output_type['subtitle']}")
logging.info(f"video_ouput: {self.output_type['video']}")
logging.info(f"bilingual_ouput: {self.output_type['bilingual']}")
logging.info("Pre-process setting:")
for key in self.pre_setting:
logging.info(f"{key}: {self.pre_setting[key]}")
logging.info("Post-process setting:")
for key in self.post_setting:
logging.info(f"{key}: {self.post_setting[key]}")
@staticmethod
def fromYoutubeLink(youtube_url, task_id, task_dir, task_cfg):
"""
Creates a YoutubeTask instance from a YouTube URL.
"""
return YoutubeTask(task_id, task_dir, task_cfg, youtube_url)
@staticmethod
def fromAudioFile(audio_path, task_id, task_dir, task_cfg):
"""
Creates an AudioTask instance from an audio file path.
"""
return AudioTask(task_id, task_dir, task_cfg, audio_path)
@staticmethod
def fromVideoFile(video_path, task_id, task_dir, task_cfg):
"""
Creates a VideoTask instance from a video file path.
"""
return VideoTask(task_id, task_dir, task_cfg, video_path)
@staticmethod
def fromSRTFile(srt_path, task_id, task_dir, task_cfg):
"""
Creates a SRTTask instance from a srt file path.
"""
return SRTTask(task_id, task_dir, task_cfg, srt_path)
# Module 1 ASR: audio --> SRT_script
def get_srt_class(self):
"""
Handles the ASR module to convert audio to SRT script format.
"""
# Instead of using the script_en variable directly, we'll use script_input
# TODO: setup ASR module like translator
self.status = TaskStatus.INITIALIZING_ASR
if self.SRT_Script != None:
logging.info("SRT input mode, skip ASR Module")
return
method = self.ASR_setting["whisper_config"]["method"]
whisper_model = self.ASR_setting["whisper_config"]["whisper_model"]
src_srt_path = self.task_local_dir.joinpath(f"task_{self.task_id}_{self.source_lang}.srt")
if not Path.exists(src_srt_path):
# extract script from audio
logging.info("extract script from audio")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.info(f"Module 1: ASR inference method: {method}")
init_prompt = "Hello, welcome to my lecture." if self.source_lang == "EN" else ""
if method == "api":
with open(self.audio_path, 'rb') as audio_file:
transcript = openai.Audio.transcribe(model="whisper-1", file=audio_file, response_format="srt", language=self.source_lang.lower(), prompt=init_prompt)
elif method == "stable":
model = stable_whisper.load_model(whisper_model, device)
transcript = model.transcribe(str(self.audio_path), regroup=False,
initial_prompt=init_prompt)
(
transcript
.split_by_punctuation(['.', '。', '?'])
.merge_by_gap(.15, max_words=3)
.merge_by_punctuation([' '])
.split_by_punctuation(['.', '。', '?'])
)
transcript = transcript.to_dict()
transcript = transcript['segments']
# after get the transcript, release the gpu resource
torch.cuda.empty_cache()
else:
raise RuntimeError(f"unavaliable ASR inference method: {method}")
if isinstance(transcript, str): | self.SRT_Script = SrtScript.parse_from_srt_file(self.source_lang, self.target_lang, domain = self.field, srt_str = transcript.rstrip()) | 0 | 2023-12-20 01:46:47+00:00 | 12k |
YyzHarry/shortcut-ood-fairness | train.py | [
{
"identifier": "datasets",
"path": "dataset/datasets.py",
"snippet": "DATASETS = [\n 'MIMIC',\n 'CheXpert',\n 'NIH',\n 'PadChest',\n 'VinDr',\n 'SIIM',\n 'ISIC',\n 'ODIR'\n]\nCXR_DATASETS = [\n 'MIMIC',\n 'CheXpert',\n 'NIH',\n 'PadChest',\n 'VinDr',\n 'SIIM'\n]\nATTRS = ['sex', 'ethnicity', 'age', 'sex_ethnicity']\nTASKS = ['No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',\n 'Cataract', 'Retinopathy']\n N_STEPS = 5001 # Default, subclasses may override\n CHECKPOINT_FREQ = 100 # Default, subclasses may override\n N_WORKERS = 8 # Default, subclasses may override\n INPUT_SHAPE = None # Subclasses should override\n AVAILABLE_ATTRS = None # Subclasses should override\n SPLITS = { # Default, subclasses may override\n 'tr': 0,\n 'va': 1,\n 'te': 2\n }\n EVAL_SPLITS = ['te'] # Default, subclasses may override\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age', 'ethnicity', 'sex_ethnicity']\n TASKS = [\n 'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'\n ]\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age', 'ethnicity', 'sex_ethnicity']\n TASKS = [\n 'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'\n ]\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'\n ]\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'\n ]\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'\n ]\n SPLITS = {\n 'te': 2\n }\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'Pneumothorax'\n ]\n SPLITS = {\n 'te': 2\n }\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'No Finding'\n ]\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'No Finding', 'Cataract', 'Retinopathy'\n ]\ndef get_dataset_class(dataset_name):\ndef num_environments(dataset_name):\n def __init__(self, root, split, metadata, transform, group_def='group', subsample_type=None, duplicates=None, subset_query=None):\n def _count_groups(self):\n def subsample(self, subsample_type):\n def duplicate(self, duplicates):\n def __getitem__(self, index):\n def __len__(self):\n def __init__(self, metadata, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def transform(self, x):\n def __init__(self, dss):\n def __getitem__(self, idx):\n def __len__(self):\n def __init__(self, ds, idxs):\n def __getitem__(self, idx):\n def __len__(self):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\nclass SubpopDataset:\nclass BaseImageDataset(SubpopDataset):\nclass ConcatImageDataset(SubpopDataset):\nclass SubsetImageDataset(SubpopDataset):\nclass MIMIC(BaseImageDataset):\nclass CheXpert(BaseImageDataset):\nclass NIH(BaseImageDataset):\nclass PadChest(BaseImageDataset):\nclass VinDr(BaseImageDataset):\nclass SIIM(BaseImageDataset):\nclass ISIC(BaseImageDataset):\nclass ODIR(BaseImageDataset):"
},
{
"identifier": "algorithms",
"path": "learning/algorithms.py",
"snippet": "ALGORITHMS = [\n 'ERM',\n 'StratifiedERM',\n # subgroup methods\n 'GroupDRO',\n 'IRM',\n 'CVaRDRO',\n 'JTT',\n 'LISA',\n 'DFR',\n # data augmentation\n 'Mixup',\n # domain generalization methods\n 'MMD',\n 'CORAL',\n 'DANN',\n 'CDANN',\n # imbalanced learning methods\n 'ReSample',\n 'ReWeight',\n 'SqrtReWeight',\n 'CBLoss',\n 'Focal',\n 'LDAM',\n 'BSoftmax',\n 'CRT',\n 'ReWeightCRT',\n 'VanillaCRT',\n # flat minima optimizer\n 'MA',\n 'SAM',\n # attribute balancing\n 'GroupDROAttr',\n 'ReSampleAttr',\n 'ReWeightAttr',\n]\n D = self.my_cdist(x, y)\n K = torch.zeros_like(D)\n W = size[2]\n H = size[3]\ndef get_algorithm_class(algorithm_name):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _init_model(self):\n def _compute_loss(self, i, x, y, a, step):\n def update(self, minibatch, step):\n def return_feats(self, x):\n def predict(self, x):\n def return_groups(self, y, a):\n def return_attributes(all_a):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _init_model(self):\n def _compute_loss(self, i, x, y, a, step):\n def update(self, minibatch, step):\n def return_feats(self, x):\n def predict(self, x):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None, group_def='group'):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def focal_loss(input_values, gamma):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _irm_penalty(logits, y):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams,\n grp_sizes=None, attr_sizes=None, gaussian=False):\n def my_cdist(x1, x2):\n def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100, 1000]):\n def mmd(self, x, y):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams,\n grp_sizes=None, attr_sizes=None, conditional=False, class_balance=False):\n def update(self, minibatch, step):\n def return_feats(self, x):\n def predict(self, x):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def update(self, minibatch, step):\n def return_feats(self, x):\n def predict(self, x):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step, stage1_model):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _to_ohe(self, y):\n def _lisa_mixup_data(self, s, a, x, y, alpha):\n def _rand_bbox(size, lam):\n def _mix_up(alpha, x1, x2, y1, y2):\n def _cut_mix_up(self, alpha, x1, x2, y1, y2):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def update(self, minibatch, step):\n def predict(self, x):\n def update_ma(self):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def update(self, minibatch, step):\n def norm(tensor_list, p=2):\nclass Algorithm(torch.nn.Module):\nclass ERM(Algorithm):\nclass GroupDRO(ERM):\nclass GroupDROAttr(ERM):\nclass StratifiedERM(ERM):\nclass ReSample(ERM):\nclass ReSampleAttr(ERM):\nclass ReWeightBase(ERM):\nclass ReWeight(ReWeightBase):\nclass ReWeightAttr(ReWeightBase):\nclass SqrtReWeight(ReWeight):\nclass CBLoss(ReWeight):\nclass Focal(ERM):\nclass LDAM(ERM):\nclass BSoftmax(ERM):\nclass CRT(ERM):\nclass ReWeightCRT(ReWeight):\nclass VanillaCRT(ERM):\nclass DFR(ERM):\nclass IRM(ERM):\nclass Mixup(ERM):\nclass AbstractMMD(ERM):\nclass MMD(AbstractMMD):\nclass CORAL(AbstractMMD):\nclass AbstractDANN(Algorithm):\nclass DANN(AbstractDANN):\nclass CDANN(AbstractDANN):\nclass CVaRDRO(ERM):\nclass AbstractTwoStage(Algorithm):\nclass JTT_Stage2(ERM): \nclass JTT(AbstractTwoStage):\nclass LISA(ERM):\nclass MA(ERM):\nclass SAM(ERM):"
},
{
"identifier": "early_stopping",
"path": "learning/early_stopping.py",
"snippet": "class EarlyStopping:\n def __init__(self, patience=5, lower_is_better=True):\n def __call__(self, metric, step, state_dict, path):\ndef save_model(state_dict, path):"
},
{
"identifier": "swad_utils",
"path": "learning/swad_utils.py",
"snippet": "class AveragedModel(Module):\nclass SWADBase:\nclass LossValley(SWADBase):\n def __init__(self, model, device=None, avg_fn=None, rm_optimizer=False):\n def avg_fn(averaged_model_parameter, model_parameter, num_averaged):\n def forward(self, *args, **kwargs):\n def predict(self, *args, **kwargs):\n def network(self):\n def update_parameters(self, model, step=None, start_step=None, end_step=None):\n def clone(self):\n def update_and_evaluate(self, segment_swa, val_loss):\n def get_final_model(self):\n def __init__(self, n_converge, n_tolerance, tolerance_ratio, **kwargs):\n def get_smooth_loss(self, idx):\n def is_converged(self):\n def update_and_evaluate(self, segment_swa, val_loss):\n def get_final_model(self):\n Q = list(self.smooth_Q)[: converge_idx + 1]"
},
{
"identifier": "misc",
"path": "utils/misc.py",
"snippet": "def pickle_save(filename, obj):\ndef pickle_load(filename):\ndef mac_pickle_load(file_path):\ndef mac_pickle_dump(filename, obj):\ndef load_json(json_path):\ndef save_json(json_path, data):\n def default(self, obj):\n def format(cls, text, color='white'):\ndef log(text, color='white', style='normal', with_time=True, handle=None):\ndef print_yellow(text, with_time=True):\ndef print_cyan(text, with_time=True):\ndef print_green(text, with_time=True):\ndef prepare_folders(args):\ndef l2_between_dicts(dict_1, dict_2):\n def __init__(self, ema, oneminusema_correction=True):\n def update(self, dict_data):\ndef count_samples_per_class(targets, num_labels):\ndef make_balanced_weights_per_sample(targets):\ndef pdb():\ndef seed_hash(*args):\ndef print_separator():\ndef print_row(row, colwidth=10, latex=False):\n def format_val(x):\ndef safe_load(parsed):\n def __init__(self, underlying_dataset, keys):\n def __getitem__(self, key):\n def __len__(self):\ndef split_dataset(dataset, n, seed=0):\ndef random_pairs_of_minibatches(minibatches):\ndef mixup_data(x, y, alpha=1., device=\"cpu\"):\ndef accuracy(network, loader, device):\ndef adjust_learning_rate(optimizer, lr, step, total_steps, schedule, cos=False):\n def __init__(self, fname, mode=\"a\"):\n def write(self, message):\n def flush(self):\n def __init__(self, *args, **kwargs):\n def _prototype(self, other, op):\n def __add__(self, other):\n def __rmul__(self, other):\n def __neg__(self):\n def __rsub__(self, other):\n def __truediv__(self, other):\ndef make_grid(tensor, nrow=8, padding=2, normalize=False, ranges=None, scale_each=False, pad_value=0):\n def norm_ip(img, min, max):\n def norm_range(t, ranges):\ndef save_image(tensor, filename, nrow=8, padding=2, normalize=False, ranges=None, scale_each=False, pad_value=0):\nclass NumpyEncoder(json.JSONEncoder):\nclass TextFormat:\nclass MovingAverage:\nclass _SplitDataset(torch.utils.data.Dataset):\nclass Tee:\nclass ParamDict(OrderedDict):"
},
{
"identifier": "eval_helper",
"path": "utils/eval_helper.py",
"snippet": "def predict_on_set(algorithm, loader, device):\ndef eval_metrics(algorithm, loader, device, thress=[0.5], thress_suffix=['_50'], add_arrays=False):\ndef binary_metrics(targets, preds, label_set=[0, 1], suffix='', return_arrays=False):\ndef prob_metrics(targets, preds, label_set, return_arrays=False):\n CM = confusion_matrix(targets, preds, labels=label_set)\n CM = confusion_matrix(targets, preds, labels=label_set)"
},
{
"identifier": "InfiniteDataLoader",
"path": "dataset/fast_dataloader.py",
"snippet": "class InfiniteDataLoader:\n\n def __init__(self, dataset, weights, batch_size, num_workers):\n super().__init__()\n\n if weights is not None:\n sampler = torch.utils.data.WeightedRandomSampler(\n weights, replacement=True, num_samples=batch_size)\n else:\n sampler = torch.utils.data.RandomSampler(dataset, replacement=True)\n\n batch_sampler = torch.utils.data.BatchSampler(\n sampler,\n batch_size=batch_size,\n drop_last=True)\n\n self._infinite_iterator = iter(torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=_InfiniteSampler(batch_sampler)\n ))\n\n def __iter__(self):\n while True:\n yield next(self._infinite_iterator)\n\n def __len__(self):\n raise ValueError"
}
] | import argparse
import collections
import json
import os
import random
import sys
import time
import numpy as np
import pandas as pd
import PIL
import torch
import torchvision
import torch.utils.data
import pickle
import hparams_registry
import wandb
import hashlib
from tensorboard_logger import Logger
from pathlib import Path
from torch.utils.data import DataLoader
from dataset import datasets
from learning import algorithms, early_stopping, swad_utils
from utils import misc, eval_helper
from dataset.fast_dataloader import InfiniteDataLoader
from collections import OrderedDict | 7,344 | args = parser.parse_args()
start_step = 0
misc.prepare_folders(args)
output_dir = os.path.join(args.output_dir, args.store_name)
if not args.debug:
sys.stdout = misc.Tee(os.path.join(output_dir, 'out.txt'))
sys.stderr = misc.Tee(os.path.join(output_dir, 'err.txt'))
tb_logger = Logger(logdir=output_dir, flush_secs=2)
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
print('Args:')
for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset, misc.seed_hash(args.hparams_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
hparams.update({
'image_arch': args.image_arch,
'data_augmentation': args.aug,
'task': args.task,
'attr': args.attr,
'group_def': args.group_def
})
if args.log_online:
wandb.init(project='subpop_fairness', config={**vars(args), **hparams},
name=f"train_{args.dataset}_{args.task}_{args.algorithm}_{args.attr}_"
f"{hashlib.md5(str({**vars(args), **hparams}).encode('utf-8')).hexdigest()[:8]}_"
f"{os.environ['SLURM_JOB_ID'] if 'SLURM_JOB_ID' in os.environ else ''}")
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
with open(os.path.join(output_dir, 'args.json'), 'w') as f:
json.dump(vars(args), f, indent=4)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["TOKENIZERS_PARALLELISM"] = "false"
torch.multiprocessing.set_sharing_strategy('file_system')
device = "cuda" if torch.cuda.is_available() else "cpu"
def make_combined_dataset(names, sset, group_def, override_attr=None):
ind_datasets = []
for ds in names:
ind_datasets.append(vars(datasets)[ds](args.data_dir, sset, hparams, group_def=group_def, override_attr=override_attr))
return datasets.ConcatImageDataset(ind_datasets)
if len(args.dataset) == 1:
if args.dataset[0] in vars(datasets):
train_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'tr', hparams, group_def=args.group_def)
val_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'va', hparams, group_def='group')
test_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'te', hparams, group_def='group')
else:
raise NotImplementedError
else:
train_dataset = make_combined_dataset(args.dataset, 'tr', args.group_def)
val_dataset = make_combined_dataset(args.dataset, 'va', 'group')
test_dataset = make_combined_dataset(args.dataset, 'te', 'group')
if args.algorithm == 'DFR':
train_datasets = []
for ds in args.dataset:
train_datasets.append(vars(datasets)[ds](
args.data_dir, 'va', hparams, group_def=args.group_def, subsample_type='group'))
train_dataset = datasets.ConcatImageDataset(train_datasets)
elif args.algorithm == 'StratifiedERM':
assert args.stratified_erm_subset is not None
train_dataset = datasets.SubsetImageDataset(
train_dataset, idxs=np.argwhere(np.array(train_dataset.a) == args.stratified_erm_subset).squeeze())
val_dataset = datasets.SubsetImageDataset(
val_dataset, idxs=np.argwhere(np.array(val_dataset.a) == args.stratified_erm_subset).squeeze())
test_dataset = datasets.SubsetImageDataset(
test_dataset, idxs=np.argwhere(np.array(test_dataset.a) == args.stratified_erm_subset).squeeze())
num_workers = train_dataset.N_WORKERS
input_shape = train_dataset.INPUT_SHAPE
num_labels = train_dataset.num_labels
num_attributes = train_dataset.num_attributes
data_type = train_dataset.data_type
n_steps = args.steps or train_dataset.N_STEPS
checkpoint_freq = args.checkpoint_freq or train_dataset.CHECKPOINT_FREQ
hparams.update({
"steps": n_steps
})
print(f"Dataset:\n\t[train]\t{len(train_dataset)}"
f"\n\t[val]\t{len(val_dataset)}")
if hparams['group_balanced']:
# if attribute not available, groups degenerate to classes
train_weights = np.asarray(train_dataset.weights_g)
train_weights /= np.sum(train_weights)
elif hparams['attr_balanced']:
train_weights = np.asarray(train_dataset.weights_a)
train_weights /= np.sum(train_weights)
else:
train_weights = None
|
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Shortcut Learning in Chest X-rays')
# training
parser.add_argument('--store_name', type=str, default='debug')
parser.add_argument('--dataset', type=str, default=["MIMIC"], nargs='+')
parser.add_argument('--task', type=str, default="No Finding", choices=datasets.TASKS + datasets.ATTRS)
parser.add_argument('--attr', type=str, default="sex", choices=datasets.ATTRS)
parser.add_argument('--group_def', type=str, default="group", choices=['group', 'label'])
parser.add_argument('--algorithm', type=str, default="ERM", choices=algorithms.ALGORITHMS)
# others
parser.add_argument('--output_dir', type=str, default='output')
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--hparams', type=str, help='JSON-serialized hparams dict')
parser.add_argument('--hparams_seed', type=int, default=0, help='Seed for random hparams (0 for "default hparams")')
parser.add_argument('--seed', type=int, default=0, help='Seed for everything else')
parser.add_argument('--steps', type=int, default=None)
parser.add_argument('--log_online', help='Log online using wandb', action='store_true')
parser.add_argument('--skip_ood_eval', help='skip evals on OOD datasets', action='store_true')
parser.add_argument('--log_all', help='Log all val metrics at each step to tb and wandb', action='store_true')
parser.add_argument('--stratified_erm_subset', type=int, default=None)
# two-stage related
parser.add_argument('--stage1_folder', type=str)
# early stopping
parser.add_argument('--use_es', action='store_true')
parser.add_argument('--es_strategy', choices=['metric'], default='metric')
parser.add_argument('--es_metric', type=str, default='min_group:accuracy')
parser.add_argument('--es_patience', type=int, default=5, help='Stop after this many checkpoints w/ no improvement')
# checkpoints
parser.add_argument('--resume', '-r', type=str, default='')
parser.add_argument('--checkpoint_freq', type=int, default=None, help='Checkpoint every N steps')
parser.add_argument('--skip_model_save', action='store_true')
parser.add_argument('--debug', action='store_true')
# architectures and pre-training sources
parser.add_argument('--image_arch', default='densenet_sup_in1k',
choices=['densenet_sup_in1k', 'resnet_sup_in1k', 'resnet_sup_in21k', 'resnet_simclr_in1k',
'resnet_barlow_in1k', 'vit_sup_in1k', 'vit_sup_in21k', 'vit_sup_swag', 'vit_clip_oai',
'vit_clip_laion', 'vit_dino_in1k', 'resnet_dino_in1k'])
# data augmentations
parser.add_argument('--aug', default='basic2',
choices=['none', 'basic', 'basic2', 'auto_aug', 'rand_aug', 'trivial_aug', 'augmix'])
args = parser.parse_args()
start_step = 0
misc.prepare_folders(args)
output_dir = os.path.join(args.output_dir, args.store_name)
if not args.debug:
sys.stdout = misc.Tee(os.path.join(output_dir, 'out.txt'))
sys.stderr = misc.Tee(os.path.join(output_dir, 'err.txt'))
tb_logger = Logger(logdir=output_dir, flush_secs=2)
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
print('Args:')
for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset, misc.seed_hash(args.hparams_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
hparams.update({
'image_arch': args.image_arch,
'data_augmentation': args.aug,
'task': args.task,
'attr': args.attr,
'group_def': args.group_def
})
if args.log_online:
wandb.init(project='subpop_fairness', config={**vars(args), **hparams},
name=f"train_{args.dataset}_{args.task}_{args.algorithm}_{args.attr}_"
f"{hashlib.md5(str({**vars(args), **hparams}).encode('utf-8')).hexdigest()[:8]}_"
f"{os.environ['SLURM_JOB_ID'] if 'SLURM_JOB_ID' in os.environ else ''}")
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
with open(os.path.join(output_dir, 'args.json'), 'w') as f:
json.dump(vars(args), f, indent=4)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["TOKENIZERS_PARALLELISM"] = "false"
torch.multiprocessing.set_sharing_strategy('file_system')
device = "cuda" if torch.cuda.is_available() else "cpu"
def make_combined_dataset(names, sset, group_def, override_attr=None):
ind_datasets = []
for ds in names:
ind_datasets.append(vars(datasets)[ds](args.data_dir, sset, hparams, group_def=group_def, override_attr=override_attr))
return datasets.ConcatImageDataset(ind_datasets)
if len(args.dataset) == 1:
if args.dataset[0] in vars(datasets):
train_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'tr', hparams, group_def=args.group_def)
val_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'va', hparams, group_def='group')
test_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'te', hparams, group_def='group')
else:
raise NotImplementedError
else:
train_dataset = make_combined_dataset(args.dataset, 'tr', args.group_def)
val_dataset = make_combined_dataset(args.dataset, 'va', 'group')
test_dataset = make_combined_dataset(args.dataset, 'te', 'group')
if args.algorithm == 'DFR':
train_datasets = []
for ds in args.dataset:
train_datasets.append(vars(datasets)[ds](
args.data_dir, 'va', hparams, group_def=args.group_def, subsample_type='group'))
train_dataset = datasets.ConcatImageDataset(train_datasets)
elif args.algorithm == 'StratifiedERM':
assert args.stratified_erm_subset is not None
train_dataset = datasets.SubsetImageDataset(
train_dataset, idxs=np.argwhere(np.array(train_dataset.a) == args.stratified_erm_subset).squeeze())
val_dataset = datasets.SubsetImageDataset(
val_dataset, idxs=np.argwhere(np.array(val_dataset.a) == args.stratified_erm_subset).squeeze())
test_dataset = datasets.SubsetImageDataset(
test_dataset, idxs=np.argwhere(np.array(test_dataset.a) == args.stratified_erm_subset).squeeze())
num_workers = train_dataset.N_WORKERS
input_shape = train_dataset.INPUT_SHAPE
num_labels = train_dataset.num_labels
num_attributes = train_dataset.num_attributes
data_type = train_dataset.data_type
n_steps = args.steps or train_dataset.N_STEPS
checkpoint_freq = args.checkpoint_freq or train_dataset.CHECKPOINT_FREQ
hparams.update({
"steps": n_steps
})
print(f"Dataset:\n\t[train]\t{len(train_dataset)}"
f"\n\t[val]\t{len(val_dataset)}")
if hparams['group_balanced']:
# if attribute not available, groups degenerate to classes
train_weights = np.asarray(train_dataset.weights_g)
train_weights /= np.sum(train_weights)
elif hparams['attr_balanced']:
train_weights = np.asarray(train_dataset.weights_a)
train_weights /= np.sum(train_weights)
else:
train_weights = None
| train_loader = InfiniteDataLoader( | 6 | 2023-12-15 04:10:31+00:00 | 12k |
RomGai/BrainVis | main.py | [
{
"identifier": "args",
"path": "args.py",
"snippet": ""
},
{
"identifier": "Dataset",
"path": "dataset.py",
"snippet": "class Dataset(Data.Dataset):\n def __init__(self, device, mode, data, wave_len):\n self.device = device\n self.datas, self.label ,self.clip,self.clip_moreinf = data\n self.mode = mode\n self.wave_len = wave_len\n self.__padding__()\n\n def __padding__(self):\n origin_len = self.datas[0].shape[0]\n if origin_len % self.wave_len:\n padding_len = self.wave_len - (origin_len % self.wave_len)\n padding = np.zeros((len(self.datas), padding_len, self.datas[0].shape[1]), dtype=np.float32)\n self.datas = np.concatenate([self.datas, padding], axis=-2)\n\n def __len__(self):\n return len(self.datas)\n\n def __getitem__(self, item):\n data = torch.tensor(self.datas[item]).to(self.device)\n label = self.label[item]\n clip=torch.tensor(self.clip[item]).to(self.device)\n clip_moreinf = torch.tensor(self.clip_moreinf[item]).to(self.device)\n\n return data, torch.tensor(label).to(self.device), clip,clip_moreinf\n\n def shape(self):\n return self.datas[0].shape"
},
{
"identifier": "Dataset_with_image_name",
"path": "dataset.py",
"snippet": "class Dataset_with_image_name(Data.Dataset):\n def __init__(self, device, mode, data, wave_len):\n self.device = device\n self.datas, self.label,self.img_name= data\n self.mode = mode\n self.wave_len = wave_len\n self.__padding__()\n\n def __padding__(self):\n origin_len = self.datas[0].shape[0]\n if origin_len % self.wave_len:\n padding_len = self.wave_len - (origin_len % self.wave_len)\n padding = np.zeros((len(self.datas), padding_len, self.datas[0].shape[1]), dtype=np.float32)\n self.datas = np.concatenate([self.datas, padding], axis=-2)\n\n def __len__(self):\n return len(self.datas)\n\n def __getitem__(self, item):\n data = torch.tensor(self.datas[item]).to(self.device)\n label = self.label[item]\n img_name=self.img_name[item]\n return data, torch.tensor(label).to(self.device),img_name#,clip\n\n def shape(self):\n return self.datas[0].shape"
},
{
"identifier": "TimeEncoder",
"path": "model/BrainVisModels.py",
"snippet": "class TimeEncoder(nn.Module):\n def __init__(self, args):\n super(TimeEncoder, self).__init__()\n d_model = args.d_model\n self.d=d_model\n self.momentum = args.momentum\n self.linear_proba = True\n self.nocliptune=True\n self.device = args.device\n self.data_shape = args.data_shape\n self.max_len = int(self.data_shape[0] / args.wave_length)\n print(self.max_len)\n self.mask_len = int(args.mask_ratio * self.max_len)\n self.position = PositionalEmbedding(self.max_len, d_model)\n self.mask_token = nn.Parameter(torch.randn(d_model, ))\n self.input_projection = nn.Conv1d(args.data_shape[1], d_model, kernel_size=args.wave_length,\n stride=args.wave_length)\n self.encoder = TransformerEncoder(args)\n self.momentum_encoder = TransformerEncoder(args)\n self.tokenizer = Tokenizer(d_model, args.vocab_size)\n self.reg = Regressor(d_model, args.attn_heads, 4 * d_model, 1, args.reg_layers)\n self.predict_head = nn.Linear(d_model, args.num_class)\n self.channelmapping=ChannelMapping(self.max_len,77)\n self.dimmapping = nn.Linear(d_model, 768)\n self.apply(self._init_weights)\n\n def _init_weights(self, module):\n if isinstance(module, nn.Linear):\n xavier_normal_(module.weight.data)\n if module.bias is not None:\n constant_(module.bias.data, 0.1)\n\n def copy_weight(self):\n with torch.no_grad():\n for (param_a, param_b) in zip(self.encoder.parameters(), self.momentum_encoder.parameters()):\n param_b.data = param_a.data\n\n def momentum_update(self):\n with torch.no_grad():\n for (param_a, param_b) in zip(self.encoder.parameters(), self.momentum_encoder.parameters()):\n param_b.data = self.momentum * param_b.data + (1 - self.momentum) * param_a.data\n\n def pretrain_forward(self, x):\n x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()\n tokens = self.tokenizer(x)\n\n x += self.position(x)\n\n rep_mask_token = self.mask_token.repeat(x.shape[0], x.shape[1], 1) + self.position(x)\n\n index = np.arange(x.shape[1])\n random.shuffle(index)\n v_index = index[:-self.mask_len]\n m_index = index[-self.mask_len:]\n visible = x[:, v_index, :]\n mask = x[:, m_index, :]\n tokens = tokens[:, m_index]\n\n rep_mask_token = rep_mask_token[:, m_index, :]\n\n rep_visible = self.encoder(visible)\n with torch.no_grad():\n rep_mask = self.momentum_encoder(mask)\n\n rep_mask_prediction = self.reg(rep_visible, rep_mask_token)\n token_prediction_prob = self.tokenizer.center(rep_mask_prediction)\n\n return [rep_mask, rep_mask_prediction], [token_prediction_prob, tokens]\n\n def forward(self, x):\n if self.linear_proba==True and self.nocliptune==True:\n #with torch.no_grad():\n x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()\n x += self.position(x)\n x = self.encoder(x)\n return torch.mean(x, dim=1)\n\n if self.linear_proba==False and self.nocliptune==True:\n x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()\n x += self.position(x)\n x = self.encoder(x)\n #lastrep=torch.mean(x, dim=1)\n lastrep=x\n xcls=self.predict_head(torch.mean(x, dim=1))\n return lastrep, torch.mean(x, dim=1), xcls\n\n if self.nocliptune == False: #CLIP\n x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()\n x += self.position(x)\n x = self.encoder(x)\n lastrep=torch.mean(x, dim=1)\n x=self.channelmapping(x)\n x = self.dimmapping(x)\n\n return lastrep#,x\n\n def get_tokens(self, x):\n x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()\n tokens = self.tokenizer(x)\n return tokens"
},
{
"identifier": "TimeFreqEncoder",
"path": "model/BrainVisModels.py",
"snippet": "class TimeFreqEncoder(nn.Module):\n def __init__(self, pretrained_model_time,pretrained_model_freq,args):\n super(TimeFreqEncoder, self).__init__()\n\n self.pretrained_model_time = pretrained_model_time\n self.pretrained_model_time.nocliptune=True\n self.pretrained_model_time.linear_proba=False\n self.pretrained_model_freq=pretrained_model_freq\n\n self.fc01 =nn.Linear( args.d_model+128, args.num_class)\n\n def forward(self,x):\n lastrep,time_feature,cls=self.pretrained_model_time(x)\n lstmcls,freq_feature=self.pretrained_model_freq(x)\n x = torch.cat((time_feature, freq_feature), dim=1)\n\n lastrep = x\n encoded=x\n x = self.fc01(encoded)\n\n scores=x\n return lastrep,encoded,scores"
},
{
"identifier": "FreqEncoder",
"path": "model/BrainVisModels.py",
"snippet": "class FreqEncoder(nn.Module):\n\n def __init__(self, input_size=128, lstm_size=128, lstm_layers=1, output_size=128):\n # Call parent\n super().__init__()\n # Define parameters\n self.input_size = input_size\n self.lstm_size = lstm_size\n self.lstm_layers = lstm_layers\n self.output_size = output_size\n\n # Define internal modules\n self.lstm = nn.LSTM(input_size, lstm_size, num_layers=lstm_layers, batch_first=True)\n self.output = nn.Linear(lstm_size, output_size)\n self.classifier = nn.Linear(output_size, 40)\n\n def forward(self, x):\n batch_size = x.size(0)\n x = x.permute(0, 2, 1)\n x = x.cpu()\n fourier_transform = np.fft.fft(x, axis=2)\n half_spectrum = fourier_transform[:, :, 1:440 // 2 + 1]\n amplitude_spectrum = np.abs(half_spectrum)\n\n amplitude_spectrum = torch.tensor(amplitude_spectrum).float()\n\n x = amplitude_spectrum.permute(0, 2, 1)\n x = x.to(\"cuda\")\n\n lstm_init = (torch.zeros(self.lstm_layers, batch_size, self.lstm_size),\n torch.zeros(self.lstm_layers, batch_size, self.lstm_size))\n if x.is_cuda: lstm_init = (lstm_init[0].cuda(), lstm_init[0].cuda())\n lstm_init = (Variable(lstm_init[0], volatile=x.volatile), Variable(lstm_init[1], volatile=x.volatile))\n\n x = self.lstm(x, lstm_init)[0][:, -1, :]\n reps = x\n # Forward output\n xa = F.relu(self.output(x))\n x = self.classifier(xa)\n return x, xa"
},
{
"identifier": "Trainer",
"path": "process.py",
"snippet": "class Trainer():\n def __init__(self, args, time_model, train_loader, train_linear_loader, test_loader, verbose=False):\n self.args = args\n self.verbose = verbose\n self.device = args.device\n self.print_process(self.device)\n self.model = time_model.to(torch.device(self.device))\n\n self.train_loader = train_loader\n #self.train_linear_loader = train_linear_loader\n self.train_linear_loader = train_loader\n self.test_loader = test_loader\n self.lr_decay = args.lr_decay_rate\n self.lr_decay_steps = args.lr_decay_steps\n\n self.cr = CE(self.model)\n self.alpha = args.alpha\n self.beta = args.beta\n\n self.test_cr = torch.nn.CrossEntropyLoss()\n self.num_epoch = args.num_epoch\n self.num_epoch_pretrain = args.num_epoch_pretrain\n self.eval_per_steps = args.eval_per_steps\n self.save_path = args.save_path\n\n self.step = 0\n self.best_metric = -1e9\n self.metric = 'acc'\n\n def pretrain(self):\n print('pretraining')\n self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.args.lr)\n eval_acc = 0\n align = Align()\n reconstruct = Reconstruct()\n self.model.copy_weight()\n for epoch in range(self.num_epoch_pretrain):\n print('Epoch:' + str(epoch+1))\n self.model.train()\n tqdm_dataloader = tqdm(self.train_loader)\n loss_sum = 0\n loss_mse = 0\n loss_ce = 0\n hits_sum = 0\n NDCG_sum = 0\n for idx, batch in enumerate(tqdm_dataloader):\n batch = [x.to(self.device) for x in batch]\n self.optimizer.zero_grad() # 梯度清零\n [rep_mask, rep_mask_prediction], [token_prediction_prob, tokens] = self.model.pretrain_forward(batch[0])\n align_loss = align.compute(rep_mask, rep_mask_prediction)\n loss_mse += align_loss.item()\n reconstruct_loss, hits, NDCG = reconstruct.compute(token_prediction_prob, tokens)\n loss_ce += reconstruct_loss.item()\n hits_sum += hits.item()\n NDCG_sum += NDCG\n loss = self.alpha * align_loss + self.beta * reconstruct_loss\n loss.backward()\n self.optimizer.step()\n self.model.momentum_update()\n loss_sum += loss.item()\n print('pretrain epoch{0}, loss{1}, mse{2}, ce{3}, hits{4}, ndcg{5}'.format(epoch + 1, loss_sum / (idx + 1),\n loss_mse / (idx + 1),\n loss_ce / (idx + 1), hits_sum,\n NDCG_sum / (idx + 1)))\n\n if (epoch + 1) % 20 == 0:\n torch.save(self.model.state_dict(), self.save_path + '/pretrain_model_epoch'+str(epoch+1)+'.pkl')\n\n if (epoch + 1) % 3 == 0:\n self.model.eval()\n train_rep, train_label = get_rep_with_label(self.model, self.train_linear_loader)\n test_rep, test_label = get_rep_with_label(self.model, self.test_loader)\n clf = fit_lr(train_rep, train_label)\n acc = clf.score(test_rep, test_label)\n print(acc)\n if acc > eval_acc:\n eval_acc = acc\n torch.save(self.model.state_dict(), self.save_path + '/pretrain_model.pkl')\n # It is worth noting that the highest pretraining accuracy does not mean the model is the\n # best one for finetuning, so the one with larger training epoch should be used.\n\n def finetune(self):\n print('finetune')\n self.model.linear_proba = True\n #self.args.load_pretrained_model=False\n if self.args.load_pretrained_model:\n print('load pretrained model')\n state_dict = torch.load(self.save_path + '/pretrain_model_epoch300.pkl', map_location=self.device)\n try:\n self.model.load_state_dict(state_dict)\n except:\n model_state_dict = self.model.state_dict()\n for pretrain, random_intial in zip(state_dict, model_state_dict):\n assert pretrain == random_intial\n if pretrain in ['input_projection.weight', 'input_projection.bias', 'predict_head.weight',\n 'predict_head.bias', 'position.pe.weight']:\n state_dict[pretrain] = model_state_dict[pretrain]\n self.model.load_state_dict(state_dict)\n\n self.model.eval()\n train_rep, train_label = get_rep_with_label(self.model, self.train_linear_loader)\n test_rep, test_label = get_rep_with_label(self.model, self.test_loader)\n clf = fit_lr(train_rep, train_label)\n acc = clf.score(test_rep, test_label)\n pred_label = np.argmax(clf.predict_proba(test_rep), axis=1)\n f1 = f1_score(test_label, pred_label, average='macro')\n print(acc, f1)\n\n self.model.linear_proba = False #If linear_proba = True, freeze pretrained model, train only classifier\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)\n self.scheduler = LambdaLR(self.optimizer, lr_lambda=lambda step: self.lr_decay ** step, verbose=self.verbose)\n\n for epoch in range(self.num_epoch):\n loss_epoch, time_cost = self._train_one_epoch()\n self.print_process(\n 'Finetune epoch:{0},loss:{1},training_time:{2}'.format(epoch + 1, loss_epoch, time_cost))\n\n if (epoch + 1) % 5 == 0:\n torch.save(self.model.state_dict(),\n self.save_path + '/finetune_model_epoch' + str(epoch + 1) + '.pkl')\n\n self.print_process(self.best_metric)\n return self.best_metric\n\n def _train_one_epoch(self):\n t0 = time.perf_counter()\n self.model.train()\n tqdm_dataloader = tqdm(self.train_linear_loader) if self.verbose else self.train_linear_loader\n loss_sum = 0\n pos=0\n for idx, batch in enumerate(tqdm_dataloader):\n batch = [x.to(self.device) for x in batch]\n self.optimizer.zero_grad()\n l1=l1_regularization(self.model,0.000003)\n loss = self.cr.computeft(batch)#+l1\n loss_sum += loss.item()\n\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5)\n self.optimizer.step()\n pos=pos+1\n self.step += 1\n # if self.step % self.eval_per_steps == 0:\n metric = self.eval_model()\n self.print_process(metric)\n\n if metric[self.metric] >= self.best_metric:\n torch.save(self.model.state_dict(), self.save_path + '/finetune_model.pkl')\n self.best_metric = metric[self.metric]\n self.model.train()\n\n return loss_sum / (idx + 1), time.perf_counter() - t0\n\n def eval_model(self):\n self.model.eval()\n tqdm_data_loader = tqdm(self.test_loader) if self.verbose else self.test_loader\n metrics = {'acc': 0, 'f1': 0}\n pred = []\n label = []\n test_loss = 0\n\n with torch.no_grad():\n for idx, batch in enumerate(tqdm_data_loader):\n batch = [x.to(self.device) for x in batch]\n ret = self.compute_metrics(batch)\n if len(ret) == 2:\n pred_b, label_b = ret\n pred += pred_b\n label += label_b\n else:\n pred_b, label_b, test_loss_b = ret\n pred += pred_b\n label += label_b\n test_loss += test_loss_b.cpu().item()\n print(\"aaa\")\n print(len(label))\n confusion_mat = self._confusion_mat(label, pred)\n self.print_process(confusion_mat)\n if self.args.num_class == 2:\n metrics['f1'] = f1_score(y_true=label, y_pred=pred)\n metrics['precision'] = precision_score(y_true=label, y_pred=pred)\n metrics['recall'] = recall_score(y_true=label, y_pred=pred)\n else:\n metrics['f1'] = f1_score(y_true=label, y_pred=pred, average='macro')\n metrics['micro_f1'] = f1_score(y_true=label, y_pred=pred, average='micro')\n metrics['acc'] = accuracy_score(y_true=label, y_pred=pred)\n metrics['test_loss'] = test_loss / (idx + 1)\n return metrics\n\n def compute_metrics(self, batch):\n seqs, label, clip, clip_moreinf = batch\n lastrep, rep,scores = self.model(seqs)\n _, pred = torch.topk(scores, 1)\n test_loss = self.test_cr(scores, label.view(-1).long())\n pred = pred.view(-1).tolist()\n return pred, label.tolist(), test_loss\n\n def compute_metrics_freq(self, batch,model):\n #if len(batch) == 2:\n seqs, label,clip,clip_moreinf = batch\n lastrep, rep,scores = model(seqs)\n #else:\n # seqs1, seqs2, label = batch\n # lastrep, rep, scores = self.model((seqs1, seqs2))\n _, pred = torch.topk(scores, 1)\n #print(np.shape(scores))\n test_loss = self.test_cr(scores, label.view(-1).long())\n pred = pred.view(-1).tolist()\n return pred, label.tolist(), test_loss\n\n def _confusion_mat(self, label, pred):\n mat = np.zeros((self.args.num_class, self.args.num_class))\n for _label, _pred in zip(label, pred):\n mat[_label, _pred] += 1\n return mat\n\n def print_process(self, *x):\n if self.verbose:\n print(*x)\n\n def cont_pretrain(self):\n start_epoch=300\n state_dict = torch.load(self.save_path + '/pretrain_model_epoch300.pkl', map_location=self.device)\n eval_acc=0.0 # It should be modified.\n self.model.load_state_dict(state_dict)\n print('cont_pretraining')\n self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.args.lr)\n align = Align()\n reconstruct = Reconstruct()\n self.model.copy_weight()\n\n for epoch in range(self.num_epoch_pretrain):\n if(epoch<start_epoch):\n continue\n print('Epoch:' + str(epoch + 1))\n self.model.train()\n tqdm_dataloader = tqdm(self.train_loader)\n loss_sum = 0\n loss_mse = 0\n loss_ce = 0\n hits_sum = 0\n NDCG_sum = 0\n for idx, batch in enumerate(tqdm_dataloader):\n batch = [x.to(self.device) for x in batch]\n self.optimizer.zero_grad()\n [rep_mask, rep_mask_prediction], [token_prediction_prob, tokens] = self.model.pretrain_forward(batch[0])\n align_loss = align.compute(rep_mask, rep_mask_prediction)\n loss_mse += align_loss.item()\n reconstruct_loss, hits, NDCG = reconstruct.compute(token_prediction_prob, tokens)\n loss_ce += reconstruct_loss.item()\n hits_sum += hits.item()\n NDCG_sum += NDCG\n loss = self.alpha * align_loss + self.beta * reconstruct_loss\n loss.backward()\n self.optimizer.step()\n self.model.momentum_update()\n loss_sum += loss.item()\n print('pretrain epoch{0}, loss{1}, mse{2}, ce{3}, hits{4}, ndcg{5}'.format(epoch + 1, loss_sum / (idx + 1),\n loss_mse / (idx + 1),\n loss_ce / (idx + 1), hits_sum,\n NDCG_sum / (idx + 1)))\n\n if (epoch + 1) % 10 == 0:\n torch.save(self.model.state_dict(), self.save_path + '/pretrain_model_epoch'+str(epoch+1)+'.pkl')\n\n if (epoch + 1) % 3 == 0:\n self.model.eval()\n train_rep, train_label = get_rep_with_label(self.model, self.train_linear_loader)\n test_rep, test_label = get_rep_with_label(self.model, self.test_loader)\n clf = fit_lr(train_rep, train_label)\n acc = clf.score(test_rep, test_label)\n print(acc)\n if acc > eval_acc:\n eval_acc = acc\n torch.save(self.model.state_dict(), self.save_path + '/pretrain_model.pkl')\n\n def finetune_CLIP(self):\n eval_cosine = 0.0\n freq_model_options = {key: int(value) if value.isdigit() else (float(value) if value[0].isdigit() else value) for\n (key, value) in [x.split(\"=\") for x in opt.model_params]}\n freq_model = FreqEncoder(**freq_model_options)\n\n self.timefreq_model=TimeFreqEncoder(self.model,freq_model,self.args)\n self.timefreq_model = self.timefreq_model.to(torch.device(self.device))\n\n freqtime_state_dict = torch.load(self.save_path + '/timefreqmodel.pkl', map_location=self.device)\n\n self.timefreq_model.load_state_dict(freqtime_state_dict)\n\n self.timefreq_model.to(torch.device(\"cpu\"))\n\n freq_size=freq_model.output_size\n time_size=self.model.d\n clip_size=int(77*768)\n\n self.alignmodel=AlignNet(time_size,freq_size,clip_size,self.timefreq_model)\n self.alignmodel=self.alignmodel.to(torch.device(self.device))\n print('CLIP_finetune')\n self.optimizer = torch.optim.AdamW(self.alignmodel.parameters(), lr=self.args.lr)\n CLIPloss = CM()\n align=Align()\n\n for epoch in range(500):\n print('Epoch:' + str(epoch + 1))\n self.alignmodel.train()\n tqdm_dataloader = tqdm(self.train_loader)\n test_tqdm_dataloader=tqdm(self.test_loader)\n loss_clip=0\n loss_mse=0\n loss_clip_moreinf=0\n loss_mse_moreinf=0\n loss_sum = 0\n\n teloss_clip=0\n teloss_mse=0\n teloss_clip_moreinf=0\n teloss_mse_moreinf=0\n teloss_sum = 0\n\n for idx, batch in enumerate(tqdm_dataloader):\n batch = [x.to(self.device) for x in batch]\n self.optimizer.zero_grad()\n\n clippred = self.alignmodel.forward(batch[0].float())\n CLIP_loss = CLIPloss.compute(clippred.float(), batch[2].float())\n CLIP_loss_moreinf=CLIPloss.compute(clippred.float(), batch[3].float())\n align_loss=align.compute(clippred.float(), batch[2].float())\n align_loss_moreinf = align.compute(clippred.float(), batch[3].float())\n All_CLIP_loss=CLIP_loss+CLIP_loss_moreinf\n All_align_loss=align_loss+align_loss_moreinf\n\n loss_clip+= CLIP_loss.item()\n loss_mse+= align_loss.item()\n loss_clip_moreinf+= CLIP_loss_moreinf.item()\n loss_mse_moreinf+= align_loss_moreinf.item() #MSE, due to numerical considerations\n # lambda_value = 0.000002\n # l1_penalty = l1_regularization(self.model, lambda_value)\n loss = All_align_loss+All_CLIP_loss#+l1_penalty\n loss.backward()\n self.optimizer.step()\n loss_sum += loss.item()\n\n trloss=loss_sum / (idx + 1)\n trmse=loss_mse / (idx + 1)\n trmse_moreinf=loss_mse_moreinf / (idx + 1)\n trcosine=loss_clip / (idx + 1)\n trcosine_moreinf=loss_clip_moreinf / (idx + 1)\n\n for idxte, batch in enumerate(test_tqdm_dataloader):\n self.alignmodel.eval()\n batch = [x.to(self.device) for x in batch]\n clippred= self.alignmodel(batch[0])\n CLIP_loss = CLIPloss.compute(clippred, batch[2])\n CLIP_loss_moreinf = CLIPloss.compute(clippred.float(), batch[3].float())\n align_loss=align.compute(clippred, batch[2])\n align_loss_moreinf = align.compute(clippred.float(), batch[3].float())\n All_CLIP_loss = CLIP_loss + CLIP_loss_moreinf\n All_align_loss = align_loss + align_loss_moreinf\n\n teloss_clip+= CLIP_loss.item()\n teloss_mse+= align_loss.item()\n teloss_clip_moreinf+= CLIP_loss_moreinf.item()\n teloss_mse_moreinf+= align_loss_moreinf.item()\n teloss = All_align_loss+All_CLIP_loss\n teloss_sum += teloss.item()\n\n teloss = teloss_sum / (idxte + 1)\n temse = teloss_mse / (idxte + 1)\n tecosine = teloss_clip / (idxte + 1)\n temse_moreinf = teloss_mse_moreinf / (idxte + 1)\n tecosine_moreinf = teloss_clip_moreinf / (idxte + 1)\n\n print('clip_finetune epoch{0}, trloss{1}, trmse{2}, trcosine{3},trmse_moreinf{4},trcosine_moreinf{5}, '\n 'teloss{6}, temse{7}, tecosine{8},temse_moreinf{9}, tecosine_moreinf{10}'.format(epoch + 1,\n trloss,trmse,trcosine,trmse_moreinf,trcosine_moreinf,teloss,temse,tecosine,temse_moreinf,tecosine_moreinf))\n\n\n if (epoch + 1) % 10 == 0:\n torch.save(self.alignmodel.state_dict(),\n self.save_path + '/clipfinetune_model_epoch' + str(epoch + 1) + 'MAEchange.pkl')\n\n if tecosine > eval_cosine:\n eval_cosine = tecosine\n torch.save(self.alignmodel.state_dict(), self.save_path + '/clipfinetune_model.pkl')\n\n def finetune_timefreq(self):\n time_state_dict = torch.load(self.save_path + '/finetune_model_epoch80.pkl',\n map_location=self.device)\n print(\"freq_train\")\n\n self.model.load_state_dict(time_state_dict)\n\n self.model.eval()\n self.model.to(torch.device(\"cuda\"))\n train_rep, train_label = get_rep_with_label(self.model, self.train_linear_loader)\n test_rep, test_label = get_rep_with_label(self.model, self.test_loader)\n clf = fit_lr(train_rep, train_label)\n acc = clf.score(test_rep, test_label)\n pred_label = np.argmax(clf.predict_proba(test_rep), axis=1)\n f1 = f1_score(test_label, pred_label, average='macro')\n print(acc, f1)\n self.model.train()\n self.model.to(torch.device(\"cpu\"))\n\n freq_model_options = {key: int(value) if value.isdigit() else (float(value) if value[0].isdigit() else value) for\n (key, value) in [x.split(\"=\") for x in opt.model_params]}\n freq_model = FreqEncoder(**freq_model_options)\n\n if opt.pretrained_net != '':\n freq_model = torch.load(opt.pretrained_net)\n\n self.timefreq_model=TimeFreqEncoder(self.model,freq_model,self.args)\n self.timefreq_model = self.timefreq_model.to(torch.device(self.device))\n\n self.optimizer = torch.optim.AdamW(self.timefreq_model.parameters(), lr=self.args.lr)\n cr_freq = CE(self.timefreq_model)\n eval_acc = 0\n\n for epoch in range(50):\n print('Epoch:' + str(epoch + 1))\n self.timefreq_model.train()\n tqdm_dataloader = tqdm(self.train_loader)\n test_tqdm_dataloader=tqdm(self.test_loader)\n loss_sum = 0\n\n for idx, batch in enumerate(tqdm_dataloader):\n batch = [x.to(self.device) for x in batch]\n self.optimizer.zero_grad()\n loss=cr_freq.computefreq(batch)\n loss.backward()\n self.optimizer.step()\n loss_sum += loss.item()\n\n trloss=loss_sum / (idx + 1)\n\n metrics = {'acc': 0, 'f1': 0}\n pred = []\n label = []\n test_loss = 0\n\n for idxte, batch in enumerate(test_tqdm_dataloader):\n self.timefreq_model.eval()\n batch = [x.to(self.device) for x in batch]\n ret = self.compute_metrics_freq(batch,self.timefreq_model)\n if len(ret) == 2:\n pred_b, label_b = ret\n pred += pred_b\n label += label_b\n else:\n pred_b, label_b, test_loss_b = ret\n pred += pred_b\n label += label_b\n test_loss += test_loss_b.cpu().item()\n confusion_mat = self._confusion_mat(label, pred)\n self.print_process(confusion_mat)\n\n if self.args.num_class == 2:\n metrics['f1'] = f1_score(y_true=label, y_pred=pred)\n metrics['precision'] = precision_score(y_true=label, y_pred=pred)\n metrics['recall'] = recall_score(y_true=label, y_pred=pred)\n else:\n metrics['f1'] = f1_score(y_true=label, y_pred=pred, average='macro')\n metrics['micro_f1'] = f1_score(y_true=label, y_pred=pred, average='micro')\n metrics['acc'] = accuracy_score(y_true=label, y_pred=pred)\n metrics['test_loss'] = test_loss / (idx + 1)\n\n print('timefreq_finetune epoch{0}, trloss{1}, teloss{2},teacc{3}'.format(epoch + 1, trloss, metrics['test_loss'],metrics['acc']))\n\n if (epoch + 1) % 5 == 0:\n torch.save(self.timefreq_model.state_dict(),\n self.save_path + '/timefreqmodel_epoch' + str(epoch + 1) + '.pkl')\n\n if metrics['acc'] > eval_acc:\n eval_acc = metrics['acc']\n torch.save(self.timefreq_model.state_dict(), self.save_path + '/timefreqmodel.pkl')"
},
{
"identifier": "fit_lr",
"path": "classification.py",
"snippet": "def fit_lr(features, y):\n pipe = make_pipeline(\n StandardScaler(),\n LogisticRegression(\n random_state=3407,\n max_iter=1000000,\n multi_class='ovr'\n )\n )\n pipe.fit(features, y)\n return pipe"
},
{
"identifier": "get_rep_with_label",
"path": "classification.py",
"snippet": "def get_rep_with_label(model, dataloader):\n reps = []\n labels = []\n with torch.no_grad():\n for batch in tqdm(dataloader):\n seq, label,clip,clip_moreinf = batch\n seq = seq.to(args.device)\n labels += label.cpu().numpy().tolist()\n rep = model(seq)\n reps += rep.cpu().numpy().tolist()\n return reps, labels"
},
{
"identifier": "get_rep_with_label_with_image_name",
"path": "classification.py",
"snippet": "def get_rep_with_label_with_image_name(model, dataloader):\n reps = []\n clips=[]\n last_reps=[]\n labels = []\n preds= []\n seqs=[]\n scores= []\n image_names=[]\n with torch.no_grad():\n for batch in tqdm(dataloader):\n seq, label,image_name = batch\n seqs+=seq.cpu().numpy().tolist()\n seq = seq.to(args.device)\n labels += label.cpu().numpy().tolist()\n rep, encoded,score = model(seq)\n reps+=rep.cpu().numpy().tolist()\n image_names+=list(image_name)\n _, pred = torch.topk(score, 1)\n preds += pred.cpu().numpy().tolist()\n acc=accuracy_score(y_true=labels, y_pred=preds)\n print(\"testortrainacc\")\n print(acc)\n\n return labels,image_names, preds,seqs,reps,acc"
}
] | import warnings
import torch.utils.data as Data
import argparse
import torch; torch.utils.backcompat.broadcast_warning.enabled = True
import torch.optim
import torch.backends.cudnn as cudnn; cudnn.benchmark = True
import numpy as np
from args import args, Test_data, Train_data_all, Train_data, Train_data_all_with_image_name, Train_data_with_image_name, Test_data_with_image_name
from dataset import Dataset,Dataset_with_image_name
from model.BrainVisModels import TimeEncoder,TimeFreqEncoder,FreqEncoder
from process import Trainer
from classification import fit_lr, get_rep_with_label,get_rep_with_label_with_image_name
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score | 9,007 | warnings.filterwarnings('ignore')
parser = argparse.ArgumentParser(description="Template")
parser.add_argument('-mp','--model_params', default='', nargs='*', help='list of key=value pairs of model options')
# Parse arguments
opt = parser.parse_args()
def main():
## Save data to local path
## Reduce the data load time on server for other training steps
# with open("data/EEG_divided/Train_data_all.pkl", "wb") as f:
# pickle.dump(Train_data_all,f)
#
# with open("data/EEG_divided/Train_data.pkl", "wb") as j:
# pickle.dump(Train_data,j)
#
# with open("data/EEG_divided/Test_data.pkl", "wb") as k:
# pickle.dump(Test_data,k)
torch.set_num_threads(12)
torch.cuda.manual_seed(3407)
train_dataset = Dataset(device=args.device, mode='pretrain', data=Train_data_all, wave_len=args.wave_length)
train_loader = Data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True)
args.data_shape = train_dataset.shape()
train_linear_dataset = Dataset(device=args.device, mode='supervise_train', data=Train_data, wave_len=args.wave_length)
train_linear_loader = Data.DataLoader(train_linear_dataset, batch_size=args.train_batch_size, shuffle=True)
test_dataset = Dataset(device=args.device, mode='test', data=Test_data, wave_len=args.wave_length)
test_loader = Data.DataLoader(test_dataset, batch_size=args.test_batch_size)
all_train_linear_dataset_with_image_name = Dataset_with_image_name(device=args.device, mode='supervise_train', data=Train_data_all_with_image_name, wave_len=args.wave_length)
all_train_linear_loader_with_image_name = Data.DataLoader(all_train_linear_dataset_with_image_name, batch_size=args.train_batch_size)
test_dataset_with_image_name = Dataset_with_image_name(device=args.device, mode='test', data=Test_data_with_image_name, wave_len=args.wave_length)
test_loader_with_image_name = Data.DataLoader(test_dataset_with_image_name, batch_size=args.test_batch_size)
print(args.data_shape)
print('dataset initial ends')
time_model = TimeEncoder(args)
print('model initial ends')
trainer = Trainer(args, time_model, train_loader, train_linear_loader, test_loader, verbose=True)
train_mode=True #True for training, False for the export of test data for image generation
if train_mode:
trainer.pretrain()
#trainer.cont_pretrain()
#trainer.finetune()
## Start from this step, to finetune on single subject, please modify the 'datautils.py'.
#trainer.finetune_timefreq()
#trainer.finetune_CLIP()
else:
## We suggest exporting data by single subject
timeE = TimeEncoder(args).to("cuda")
freq_model_options = {key: int(value) if value.isdigit() else (float(value) if value[0].isdigit() else value)
for (key, value) in [x.split("=") for x in opt.model_params]}
# Create discriminator model
| warnings.filterwarnings('ignore')
parser = argparse.ArgumentParser(description="Template")
parser.add_argument('-mp','--model_params', default='', nargs='*', help='list of key=value pairs of model options')
# Parse arguments
opt = parser.parse_args()
def main():
## Save data to local path
## Reduce the data load time on server for other training steps
# with open("data/EEG_divided/Train_data_all.pkl", "wb") as f:
# pickle.dump(Train_data_all,f)
#
# with open("data/EEG_divided/Train_data.pkl", "wb") as j:
# pickle.dump(Train_data,j)
#
# with open("data/EEG_divided/Test_data.pkl", "wb") as k:
# pickle.dump(Test_data,k)
torch.set_num_threads(12)
torch.cuda.manual_seed(3407)
train_dataset = Dataset(device=args.device, mode='pretrain', data=Train_data_all, wave_len=args.wave_length)
train_loader = Data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True)
args.data_shape = train_dataset.shape()
train_linear_dataset = Dataset(device=args.device, mode='supervise_train', data=Train_data, wave_len=args.wave_length)
train_linear_loader = Data.DataLoader(train_linear_dataset, batch_size=args.train_batch_size, shuffle=True)
test_dataset = Dataset(device=args.device, mode='test', data=Test_data, wave_len=args.wave_length)
test_loader = Data.DataLoader(test_dataset, batch_size=args.test_batch_size)
all_train_linear_dataset_with_image_name = Dataset_with_image_name(device=args.device, mode='supervise_train', data=Train_data_all_with_image_name, wave_len=args.wave_length)
all_train_linear_loader_with_image_name = Data.DataLoader(all_train_linear_dataset_with_image_name, batch_size=args.train_batch_size)
test_dataset_with_image_name = Dataset_with_image_name(device=args.device, mode='test', data=Test_data_with_image_name, wave_len=args.wave_length)
test_loader_with_image_name = Data.DataLoader(test_dataset_with_image_name, batch_size=args.test_batch_size)
print(args.data_shape)
print('dataset initial ends')
time_model = TimeEncoder(args)
print('model initial ends')
trainer = Trainer(args, time_model, train_loader, train_linear_loader, test_loader, verbose=True)
train_mode=True #True for training, False for the export of test data for image generation
if train_mode:
trainer.pretrain()
#trainer.cont_pretrain()
#trainer.finetune()
## Start from this step, to finetune on single subject, please modify the 'datautils.py'.
#trainer.finetune_timefreq()
#trainer.finetune_CLIP()
else:
## We suggest exporting data by single subject
timeE = TimeEncoder(args).to("cuda")
freq_model_options = {key: int(value) if value.isdigit() else (float(value) if value[0].isdigit() else value)
for (key, value) in [x.split("=") for x in opt.model_params]}
# Create discriminator model | freq_model = FreqEncoder(**freq_model_options) | 5 | 2023-12-16 12:52:14+00:00 | 12k |
tonnetonne814/PL-Bert-VITS2 | train_ms.py | [
{
"identifier": "DistributedBucketSampler",
"path": "data_utils.py",
"snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n i=0\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size"
},
{
"identifier": "TextAudioSpeakerCollate",
"path": "data_utils.py",
"snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n # sid = 1\n max_bert_len = max([x[4].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n bert_lengths = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n bert_padded = torch.FloatTensor(len(batch), 13, max_bert_len, 768)\n\n text_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n bert = row[4]\n bert_padded[i, :, :bert.size(1),:] = bert\n bert_lengths[i] = bert.size(1)\n\n\n if self.return_ids:\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n bert_padded,\n bert_lengths,\n sid,\n ids_sorted_decreasing,\n )\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n bert_padded,\n bert_lengths,\n sid,\n )"
},
{
"identifier": "TextAudioSpeakerLoader",
"path": "data_utils.py",
"snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.hparams = hparams\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.text_cleaners = hparams.text_cleaners\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 999)\n self.min_audio_len = getattr(hparams, \"min_audio_len\", 8192)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n self.count = 0\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n for data in self.audiopaths_sid_text:\n audiopath, sid, ph, text, bert, emo, style = data\n if not os.path.isfile(audiopath):\n continue\n if self.min_text_len <= len(text) and len(text) <= self.max_text_len:\n audiopaths_sid_text_new.append([audiopath, sid, ph, text, bert, emo, style])\n length = os.path.getsize(audiopath) // (2 * self.hop_length)\n if length < self.min_audio_len // self.hop_length:\n print(\"DATA PASS\")\n continue\n lengths.append(length)\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n print(f\"INFO:{len(self.audiopaths_sid_text)} is used as Training Dataset.\")\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, ph, text, pl_bert, emo, style = (\n audiopath_sid_text[0],\n audiopath_sid_text[1],\n audiopath_sid_text[2],\n audiopath_sid_text[3],\n audiopath_sid_text[4],\n audiopath_sid_text[5],\n audiopath_sid_text[6],\n )\n ph = self.get_text(ph)\n spec, wav = self.get_audio(audiopath)\n bert = self.get_pl_bert(pl_bert)\n sid = self.get_sid(sid)\n\n # parameter checker \n assert len(ph) == bert.size(1)\n\n return (ph, spec, wav, sid, bert)\n \n def get_pl_bert(self, filename):\n path = os.path.join(\"pl_bert_embeddings\", f\"{filename}.PlBertJa\")\n data = torch.load(path)\n if self.add_blank:\n L, T, H = data.shape\n new_data = torch.zeros(size=(L,2*T+1,H), dtype=data.dtype)\n for idx in range(T):\n target_idx = idx*2+1\n new_data[:, target_idx, :] = data[:, idx, :]\n data = new_data\n return data\n\n def get_audio(self, filename):\n # TODO : if linear spec exists convert to mel from existing linear spec\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate\n )\n )\n # audio_norm = audio / self.max_wav_value\n audio_norm = audio.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n if os.path.exists(spec_filename):\n spec = torch.load(spec_filename)\n else:\n if self.use_mel_spec_posterior:\n \"\"\"TODO : (need verification)\n if linear spec exists convert to\n mel from existing linear spec (uncomment below lines)\"\"\"\n # if os.path.exists(filename.replace(\".wav\", \".spec.pt\")):\n # # spec, n_fft, num_mels, sampling_rate, fmin, fmax\n # spec = spec_to_mel_torch(\n # torch.load(filename.replace(\".wav\", \".spec.pt\")),\n # self.filter_length, self.n_mel_channels, self.sampling_rate,\n # self.hparams.mel_fmin, self.hparams.mel_fmax)\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text):\n if self.cleaned_text:\n text_norm = cleaned_text_to_sequence(text)\n else:\n text_norm = text_to_sequence(text, self.text_cleaners)\n if self.add_blank:\n text_norm = commons.intersperse(text_norm, 0)\n text_norm = torch.LongTensor(text_norm)\n return text_norm\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)"
},
{
"identifier": "discriminator_loss",
"path": "losses.py",
"snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses"
},
{
"identifier": "feature_loss",
"path": "losses.py",
"snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2"
},
{
"identifier": "generator_loss",
"path": "losses.py",
"snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses"
},
{
"identifier": "kl_loss",
"path": "losses.py",
"snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l"
},
{
"identifier": "mel_spectrogram_torch",
"path": "mel_processing.py",
"snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(\n sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax\n )\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n if version.parse(torch.__version__) >= version.parse(\"2\"):\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n else:\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec"
},
{
"identifier": "spec_to_mel_torch",
"path": "mel_processing.py",
"snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(\n sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax\n )\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec"
},
{
"identifier": "AVAILABLE_DURATION_DISCRIMINATOR_TYPES",
"path": "models.py",
"snippet": "AVAILABLE_DURATION_DISCRIMINATOR_TYPES = [\n \"dur_disc_1\",\n \"dur_disc_2\",\n]"
},
{
"identifier": "AVAILABLE_FLOW_TYPES",
"path": "models.py",
"snippet": "AVAILABLE_FLOW_TYPES = [\n \"pre_conv\",\n \"pre_conv2\",\n \"fft\",\n \"mono_layer_inter_residual\",\n \"mono_layer_post_residual\",\n]"
},
{
"identifier": "DurationDiscriminatorV1",
"path": "models.py",
"snippet": "class DurationDiscriminatorV1(nn.Module): # vits2\n # TODO : not using \"spk conditioning\" for now according to the paper.\n # Can be a better discriminator if we use it.\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n # self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n # self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n # if gin_channels != 0:\n # self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n # x = torch.relu(x)\n # x = self.pre_out_norm_1(x)\n # x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n # x = torch.relu(x)\n # x = self.pre_out_norm_2(x)\n # x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n # if g is not None:\n # g = torch.detach(g)\n # x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n # x = torch.relu(x)\n # x = self.norm_1(x)\n # x = self.drop(x)\n x = self.conv_2(x * x_mask)\n # x = torch.relu(x)\n # x = self.norm_2(x)\n # x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs"
},
{
"identifier": "DurationDiscriminatorV2",
"path": "models.py",
"snippet": "class DurationDiscriminatorV2(nn.Module): # vits2\n # TODO : not using \"spk conditioning\" for now according to the paper.\n # Can be a better discriminator if we use it.\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n # if gin_channels != 0:\n # self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n # if g is not None:\n # g = torch.detach(g)\n # x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append([output_prob])\n\n return output_probs"
},
{
"identifier": "MultiPeriodDiscriminator",
"path": "models.py",
"snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11, 17, 23, 37]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "SynthesizerTrn",
"path": "models.py",
"snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n bert_emb_size,\n n_speakers=0,\n gin_channels=0,\n use_sdp=True,\n **kwargs,\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", False\n )\n self.use_transformer_flows = kwargs.get(\"use_transformer_flows\", False)\n self.transformer_flow_type = kwargs.get(\n \"transformer_flow_type\", \"mono_layer_post_residual\"\n )\n if self.use_transformer_flows:\n assert (\n self.transformer_flow_type in AVAILABLE_FLOW_TYPES\n ), f\"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}\"\n self.use_sdp = use_sdp\n # self.use_duration_discriminator = kwargs.get(\"use_duration_discriminator\", False)\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n else:\n self.enc_gin_channels = 0\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n bert_emb_size=bert_emb_size,\n gin_channels=self.enc_gin_channels,\n )\n\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n # self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)\n self.flow = ResidualCouplingTransformersBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 4,\n gin_channels=gin_channels,\n use_transformer_flows=self.use_transformer_flows,\n transformer_flow_type=self.transformer_flow_type,\n )\n\n if use_sdp:\n self.dp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n else:\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers > 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n\n # 重み付け加算式を取る\n self.WSL = WeightSumLayer(n_layers=13)\n\n def forward(self, x, x_lengths, y, y_lengths, bert, bert_lengths, sid=None):\n bert = self.WSL(bert)\n\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = None\n\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, bert, bert_lengths, g=g)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n if self.use_sdp:\n l_length = self.dp(x, x_mask, w, g=g)\n l_length = l_length / torch.sum(x_mask)\n logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=1.0)\n logw_ = torch.log(w + 1e-6) * x_mask\n else:\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n bert,\n bert_lengths,\n sid=None,\n noise_scale=1,\n length_scale=1,\n noise_scale_w=1.0,\n max_len=None,\n ):\n bert = self.WSL(bert)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = None\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, bert, bert_lengths, g=g)\n if self.use_sdp:\n logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)\n else:\n logw = self.dp(x, x_mask, g=g)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n\n # currently vits-2 is not capable of voice conversion\n ## comment - choihkk\n ## Assuming the use of the ResidualCouplingTransformersLayer2 module, it seems that voice conversion is possible \n def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):\n assert self.n_speakers > 0, \"n_speakers have to be larger than 0.\"\n g_src = self.emb_g(sid_src).unsqueeze(-1)\n g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)\n z_p = self.flow(z, y_mask, g=g_src)\n z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)\n o_hat = self.dec(z_hat * y_mask, g=g_tgt)\n return o_hat, y_mask, (z, z_p, z_hat)"
},
{
"identifier": "symbols",
"path": "PL_BERT_ja/text/symbols.py",
"snippet": ""
}
] | import argparse
import itertools
import json
import math
import os
import logging
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import tqdm
import commons
import models
import utils
from torch import nn, optim
from torch.cuda.amp import GradScaler, autocast
from torch.nn import functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from data_utils import (DistributedBucketSampler, TextAudioSpeakerCollate,
TextAudioSpeakerLoader)
from losses import discriminator_loss, feature_loss, generator_loss, kl_loss
from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
from models import (AVAILABLE_DURATION_DISCRIMINATOR_TYPES,
AVAILABLE_FLOW_TYPES,
DurationDiscriminatorV1, DurationDiscriminatorV2,
MultiPeriodDiscriminator, SynthesizerTrn)
from PL_BERT_ja.text.symbols import symbols | 10,766 | ),
)
def run(rank, n_gpus, hps):
net_dur_disc = None
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(
backend="nccl", init_method="env://", world_size=n_gpus, rank=rank
)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
if (
"use_mel_posterior_encoder" in hps.model.keys()
and hps.model.use_mel_posterior_encoder == True
):
print("Using mel posterior encoder for VITS2")
posterior_channels = 128 # vits2
hps.data.use_mel_posterior_encoder = True
else:
print("Using lin posterior encoder for VITS1")
posterior_channels = hps.data.filter_length // 2 + 1
hps.data.use_mel_posterior_encoder = False
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
train_sampler = DistributedBucketSampler(
train_dataset,
hps.train.batch_size,
[32, 300, 500, 700, 900, 1100, 1300, 1500, 3000],
num_replicas=n_gpus,
rank=rank,
shuffle=True,
)
collate_fn = TextAudioSpeakerCollate()
train_loader = DataLoader(
train_dataset,
num_workers=8,
shuffle=False,
pin_memory=True,
collate_fn=collate_fn,
batch_sampler=train_sampler,
)
if rank == 0:
eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
eval_loader = DataLoader(
eval_dataset,
num_workers=8,
shuffle=False,
batch_size=hps.train.batch_size,
pin_memory=True,
drop_last=False,
collate_fn=collate_fn,
)
# some of these flags are not being used in the code and directly set in hps json file.
# they are kept here for reference and prototyping.
if (
"use_transformer_flows" in hps.model.keys()
and hps.model.use_transformer_flows == True
):
use_transformer_flows = True
transformer_flow_type = hps.model.transformer_flow_type
print(f"Using transformer flows {transformer_flow_type} for VITS2")
assert (
transformer_flow_type in AVAILABLE_FLOW_TYPES
), f"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}"
else:
print("Using normal flows for VITS1")
use_transformer_flows = False
if (
"use_spk_conditioned_encoder" in hps.model.keys()
and hps.model.use_spk_conditioned_encoder == True
):
if hps.data.n_speakers == 0:
raise ValueError(
"n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model"
)
use_spk_conditioned_encoder = True
else:
print("Using normal encoder for VITS1")
use_spk_conditioned_encoder = False
if (
"use_noise_scaled_mas" in hps.model.keys()
and hps.model.use_noise_scaled_mas == True
):
print("Using noise scaled MAS for VITS2")
use_noise_scaled_mas = True
mas_noise_scale_initial = 0.01
noise_scale_delta = 2e-6
else:
print("Using normal MAS for VITS1")
use_noise_scaled_mas = False
mas_noise_scale_initial = 0.0
noise_scale_delta = 0.0
if (
"use_duration_discriminator" in hps.model.keys()
and hps.model.use_duration_discriminator == True
):
# print("Using duration discriminator for VITS2")
use_duration_discriminator = True
# comment - choihkk
# add duration discriminator type here
# I think it would be a good idea to come up with a method to input this part accurately, like a hydra
duration_discriminator_type = getattr(
hps.model, "duration_discriminator_type", "dur_disc_1"
)
print(f"Using duration_discriminator {duration_discriminator_type} for VITS2")
assert (
|
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
# from tensorboardX import SummaryWriter
torch.backends.cudnn.benchmark = True
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "6060"
hps = utils.get_hparams()
mp.spawn(
run,
nprocs=n_gpus,
args=(
n_gpus,
hps,
),
)
def run(rank, n_gpus, hps):
net_dur_disc = None
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(
backend="nccl", init_method="env://", world_size=n_gpus, rank=rank
)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
if (
"use_mel_posterior_encoder" in hps.model.keys()
and hps.model.use_mel_posterior_encoder == True
):
print("Using mel posterior encoder for VITS2")
posterior_channels = 128 # vits2
hps.data.use_mel_posterior_encoder = True
else:
print("Using lin posterior encoder for VITS1")
posterior_channels = hps.data.filter_length // 2 + 1
hps.data.use_mel_posterior_encoder = False
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
train_sampler = DistributedBucketSampler(
train_dataset,
hps.train.batch_size,
[32, 300, 500, 700, 900, 1100, 1300, 1500, 3000],
num_replicas=n_gpus,
rank=rank,
shuffle=True,
)
collate_fn = TextAudioSpeakerCollate()
train_loader = DataLoader(
train_dataset,
num_workers=8,
shuffle=False,
pin_memory=True,
collate_fn=collate_fn,
batch_sampler=train_sampler,
)
if rank == 0:
eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
eval_loader = DataLoader(
eval_dataset,
num_workers=8,
shuffle=False,
batch_size=hps.train.batch_size,
pin_memory=True,
drop_last=False,
collate_fn=collate_fn,
)
# some of these flags are not being used in the code and directly set in hps json file.
# they are kept here for reference and prototyping.
if (
"use_transformer_flows" in hps.model.keys()
and hps.model.use_transformer_flows == True
):
use_transformer_flows = True
transformer_flow_type = hps.model.transformer_flow_type
print(f"Using transformer flows {transformer_flow_type} for VITS2")
assert (
transformer_flow_type in AVAILABLE_FLOW_TYPES
), f"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}"
else:
print("Using normal flows for VITS1")
use_transformer_flows = False
if (
"use_spk_conditioned_encoder" in hps.model.keys()
and hps.model.use_spk_conditioned_encoder == True
):
if hps.data.n_speakers == 0:
raise ValueError(
"n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model"
)
use_spk_conditioned_encoder = True
else:
print("Using normal encoder for VITS1")
use_spk_conditioned_encoder = False
if (
"use_noise_scaled_mas" in hps.model.keys()
and hps.model.use_noise_scaled_mas == True
):
print("Using noise scaled MAS for VITS2")
use_noise_scaled_mas = True
mas_noise_scale_initial = 0.01
noise_scale_delta = 2e-6
else:
print("Using normal MAS for VITS1")
use_noise_scaled_mas = False
mas_noise_scale_initial = 0.0
noise_scale_delta = 0.0
if (
"use_duration_discriminator" in hps.model.keys()
and hps.model.use_duration_discriminator == True
):
# print("Using duration discriminator for VITS2")
use_duration_discriminator = True
# comment - choihkk
# add duration discriminator type here
# I think it would be a good idea to come up with a method to input this part accurately, like a hydra
duration_discriminator_type = getattr(
hps.model, "duration_discriminator_type", "dur_disc_1"
)
print(f"Using duration_discriminator {duration_discriminator_type} for VITS2")
assert ( | duration_discriminator_type in AVAILABLE_DURATION_DISCRIMINATOR_TYPES | 9 | 2023-12-16 05:34:02+00:00 | 12k |
camenduru/FreeInit-hf | animatediff/models/unet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "animatediff/models/unet_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "animatediff/models/unet_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "InflatedConv3d",
"path": "animatediff/models/resnet.py",
"snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
},
{
"identifier": "InflatedGroupNorm",
"path": "animatediff/models/resnet.py",
"snippet": "class InflatedGroupNorm(nn.GroupNorm):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
}
] | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from .unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d, InflatedGroupNorm
from diffusers.utils import WEIGHTS_NAME
import os
import json
import pdb
import torch
import torch.nn as nn
import torch.utils.checkpoint | 7,711 | # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
use_inflated_groupnorm=False,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
| # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
use_inflated_groupnorm=False,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
| down_block = get_down_block( | 5 | 2023-12-19 21:06:32+00:00 | 12k |
zyrant/SPGroup3D | tools/data_converter/indoor_converter.py | [
{
"identifier": "S3DISData",
"path": "tools/data_converter/s3dis_data_utils.py",
"snippet": "class S3DISData(object):\n \"\"\"S3DIS data.\n\n Generate s3dis infos for s3dis_converter.\n\n Args:\n root_path (str): Root path of the raw data.\n split (str, optional): Set split type of the data. Default: 'Area_1'.\n \"\"\"\n\n def __init__(self, root_path, split='Area_1'):\n self.root_dir = root_path\n self.split = split\n self.data_dir = osp.join(root_path,\n 'Stanford3dDataset_v1.2_Aligned_Version')\n\n # Following `GSDN <https://arxiv.org/abs/2006.12356>`_, use 5 furniture\n # classes for detection: table, chair, sofa, bookcase, board.\n self.cat_ids = np.array([7, 8, 9, 10, 11])\n self.cat_ids2class = {\n cat_id: i\n for i, cat_id in enumerate(list(self.cat_ids))\n }\n\n assert split in [\n 'Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_6'\n ]\n self.sample_id_list = os.listdir(osp.join(self.data_dir,\n split)) # conferenceRoom_1\n for sample_id in self.sample_id_list:\n if os.path.isfile(osp.join(self.data_dir, split, sample_id)):\n self.sample_id_list.remove(sample_id)\n\n def __len__(self):\n return len(self.sample_id_list)\n\n def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):\n \"\"\"Get data infos.\n\n This method gets information from the raw data.\n\n Args:\n num_workers (int, optional): Number of threads to be used.\n Default: 4.\n has_label (bool, optional): Whether the data has label.\n Default: True.\n sample_id_list (list[int], optional): Index list of the sample.\n Default: None.\n\n Returns:\n infos (list[dict]): Information of the raw data.\n \"\"\"\n\n def process_single_scene(sample_idx):\n print(f'{self.split} sample_idx: {sample_idx}')\n info = dict()\n pc_info = {\n 'num_features': 6,\n 'lidar_idx': f'{self.split}_{sample_idx}'\n }\n info['point_cloud'] = pc_info\n pts_filename = osp.join(self.root_dir, 's3dis_data',\n f'{self.split}_{sample_idx}_point.npy')\n pts_instance_mask_path = osp.join(\n self.root_dir, 's3dis_data',\n f'{self.split}_{sample_idx}_ins_label.npy')\n pts_semantic_mask_path = osp.join(\n self.root_dir, 's3dis_data',\n f'{self.split}_{sample_idx}_sem_label.npy')\n\n points = np.load(pts_filename).astype(np.float32)\n pts_instance_mask = np.load(pts_instance_mask_path).astype(np.int)\n pts_semantic_mask = np.load(pts_semantic_mask_path).astype(np.int)\n\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask'))\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask'))\n\n ##########################superpoint#######################\n # superpoints_filename = osp.join(self.root_dir, 's3dis_data',\n # f'{self.split}_{sample_idx}_superpoint.npy')\n # superpoints = np.load(superpoints_filename)\n # mmcv.mkdir_or_exist(osp.join(self.root_dir, 'superpoints'))\n # superpoints.tofile(\n # osp.join(self.root_dir, 'superpoints', f'{self.split}_{sample_idx}.bin'))\n info['pts_superpoints_path'] = osp.join('superpoints', f'{self.split}_{sample_idx}.bin')\n ###########################################################\n\n points.tofile(\n osp.join(self.root_dir, 'points',\n f'{self.split}_{sample_idx}.bin'))\n pts_instance_mask.tofile(\n osp.join(self.root_dir, 'instance_mask',\n f'{self.split}_{sample_idx}.bin'))\n pts_semantic_mask.tofile(\n osp.join(self.root_dir, 'semantic_mask',\n f'{self.split}_{sample_idx}.bin'))\n\n info['pts_path'] = osp.join('points',\n f'{self.split}_{sample_idx}.bin')\n info['pts_instance_mask_path'] = osp.join(\n 'instance_mask', f'{self.split}_{sample_idx}.bin')\n info['pts_semantic_mask_path'] = osp.join(\n 'semantic_mask', f'{self.split}_{sample_idx}.bin')\n info['annos'] = self.get_bboxes(points, pts_instance_mask,\n pts_semantic_mask)\n\n return info\n\n sample_id_list = sample_id_list if sample_id_list is not None \\\n else self.sample_id_list\n with futures.ThreadPoolExecutor(num_workers) as executor:\n infos = executor.map(process_single_scene, sample_id_list)\n return list(infos)\n\n def get_bboxes(self, points, pts_instance_mask, pts_semantic_mask):\n \"\"\"Convert instance masks to axis-aligned bounding boxes.\n\n Args:\n points (np.array): Scene points of shape (n, 6).\n pts_instance_mask (np.ndarray): Instance labels of shape (n,).\n pts_semantic_mask (np.ndarray): Semantic labels of shape (n,).\n\n Returns:\n dict: A dict containing detection infos with following keys:\n\n - gt_boxes_upright_depth (np.ndarray): Bounding boxes\n of shape (n, 6)\n - class (np.ndarray): Box labels of shape (n,)\n - gt_num (int): Number of boxes.\n \"\"\"\n bboxes, labels = [], []\n for i in range(1, pts_instance_mask.max() + 1):\n ids = pts_instance_mask == i\n # check if all instance points have same semantic label\n assert pts_semantic_mask[ids].min() == pts_semantic_mask[ids].max()\n label = pts_semantic_mask[ids][0]\n # keep only furniture objects\n if label in self.cat_ids2class:\n labels.append(self.cat_ids2class[pts_semantic_mask[ids][0]])\n pts = points[:, :3][ids]\n min_pts = pts.min(axis=0)\n max_pts = pts.max(axis=0)\n locations = (min_pts + max_pts) / 2\n dimensions = max_pts - min_pts\n bboxes.append(np.concatenate((locations, dimensions)))\n annotation = dict()\n # follow ScanNet and SUN RGB-D keys\n annotation['gt_boxes_upright_depth'] = np.array(bboxes)\n annotation['class'] = np.array(labels)\n annotation['gt_num'] = len(labels)\n return annotation"
},
{
"identifier": "S3DISSegData",
"path": "tools/data_converter/s3dis_data_utils.py",
"snippet": "class S3DISSegData(object):\n \"\"\"S3DIS dataset used to generate infos for semantic segmentation task.\n\n Args:\n data_root (str): Root path of the raw data.\n ann_file (str): The generated scannet infos.\n split (str, optional): Set split type of the data. Default: 'train'.\n num_points (int, optional): Number of points in each data input.\n Default: 8192.\n label_weight_func (function, optional): Function to compute the\n label weight. Default: None.\n \"\"\"\n\n def __init__(self,\n data_root,\n ann_file,\n split='Area_1',\n num_points=4096,\n label_weight_func=None):\n self.data_root = data_root\n self.data_infos = mmcv.load(ann_file)\n self.split = split\n self.num_points = num_points\n\n self.all_ids = np.arange(13) # all possible ids\n self.cat_ids = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,\n 12]) # used for seg task\n self.ignore_index = len(self.cat_ids)\n\n self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \\\n self.ignore_index\n for i, cat_id in enumerate(self.cat_ids):\n self.cat_id2class[cat_id] = i\n\n # label weighting function is taken from\n # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24\n self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \\\n label_weight_func is None else label_weight_func\n\n def get_seg_infos(self):\n scene_idxs, label_weight = self.get_scene_idxs_and_label_weight()\n save_folder = osp.join(self.data_root, 'seg_info')\n mmcv.mkdir_or_exist(save_folder)\n np.save(\n osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'),\n scene_idxs)\n np.save(\n osp.join(save_folder, f'{self.split}_label_weight.npy'),\n label_weight)\n print(f'{self.split} resampled scene index and label weight saved')\n\n def _convert_to_label(self, mask):\n \"\"\"Convert class_id in loaded segmentation mask to label.\"\"\"\n if isinstance(mask, str):\n if mask.endswith('npy'):\n mask = np.load(mask)\n else:\n mask = np.fromfile(mask, dtype=np.int64)\n label = self.cat_id2class[mask]\n return label\n\n def get_scene_idxs_and_label_weight(self):\n \"\"\"Compute scene_idxs for data sampling and label weight for loss\n calculation.\n\n We sample more times for scenes with more points. Label_weight is\n inversely proportional to number of class points.\n \"\"\"\n num_classes = len(self.cat_ids)\n num_point_all = []\n label_weight = np.zeros((num_classes + 1, )) # ignore_index\n for data_info in self.data_infos:\n label = self._convert_to_label(\n osp.join(self.data_root, data_info['pts_semantic_mask_path']))\n num_point_all.append(label.shape[0])\n class_count, _ = np.histogram(label, range(num_classes + 2))\n label_weight += class_count\n\n # repeat scene_idx for num_scene_point // num_sample_point times\n sample_prob = np.array(num_point_all) / float(np.sum(num_point_all))\n num_iter = int(np.sum(num_point_all) / float(self.num_points))\n scene_idxs = []\n for idx in range(len(self.data_infos)):\n scene_idxs.extend([idx] * int(round(sample_prob[idx] * num_iter)))\n scene_idxs = np.array(scene_idxs).astype(np.int32)\n\n # calculate label weight, adopted from PointNet++\n label_weight = label_weight[:-1].astype(np.float32)\n label_weight = label_weight / label_weight.sum()\n label_weight = self.label_weight_func(label_weight).astype(np.float32)\n\n return scene_idxs, label_weight"
},
{
"identifier": "ScanNetData",
"path": "tools/data_converter/scannet_data_utils.py",
"snippet": "class ScanNetData(object):\n \"\"\"ScanNet data.\n\n Generate scannet infos for scannet_converter.\n\n Args:\n root_path (str): Root path of the raw data.\n split (str, optional): Set split type of the data. Default: 'train'.\n \"\"\"\n\n def __init__(self, root_path, split='train'):\n self.root_dir = root_path\n self.split = split\n self.split_dir = osp.join(root_path)\n self.classes = [\n 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',\n 'bookshelf', 'picture', 'counter', 'desk', 'curtain',\n 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',\n 'garbagebin'\n ]\n self.cat2label = {cat: self.classes.index(cat) for cat in self.classes}\n self.label2cat = {self.cat2label[t]: t for t in self.cat2label}\n self.cat_ids = np.array(\n [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])\n self.cat_ids2class = {\n nyu40id: i\n for i, nyu40id in enumerate(list(self.cat_ids))\n }\n assert split in ['train', 'val', 'test']\n split_file = osp.join(self.root_dir, 'meta_data',\n f'scannetv2_{split}.txt')\n mmcv.check_file_exist(split_file)\n self.sample_id_list = mmcv.list_from_file(split_file)\n self.test_mode = (split == 'test')\n\n def __len__(self):\n return len(self.sample_id_list)\n\n def get_aligned_box_label(self, idx):\n box_file = osp.join(self.root_dir, 'scannet_instance_data',\n f'{idx}_aligned_bbox.npy')\n mmcv.check_file_exist(box_file)\n return np.load(box_file)\n\n def get_unaligned_box_label(self, idx):\n box_file = osp.join(self.root_dir, 'scannet_instance_data',\n f'{idx}_unaligned_bbox.npy')\n mmcv.check_file_exist(box_file)\n return np.load(box_file)\n\n def get_axis_align_matrix(self, idx):\n matrix_file = osp.join(self.root_dir, 'scannet_instance_data',\n f'{idx}_axis_align_matrix.npy')\n mmcv.check_file_exist(matrix_file)\n return np.load(matrix_file)\n\n def get_images(self, idx):\n paths = []\n path = osp.join(self.root_dir, 'posed_images', idx)\n for file in sorted(os.listdir(path)):\n if file.endswith('.jpg'):\n paths.append(osp.join('posed_images', idx, file))\n return paths\n\n def get_extrinsics(self, idx):\n extrinsics = []\n path = osp.join(self.root_dir, 'posed_images', idx)\n for file in sorted(os.listdir(path)):\n if file.endswith('.txt') and not file == 'intrinsic.txt':\n extrinsics.append(np.loadtxt(osp.join(path, file)))\n return extrinsics\n\n def get_intrinsics(self, idx):\n matrix_file = osp.join(self.root_dir, 'posed_images', idx,\n 'intrinsic.txt')\n mmcv.check_file_exist(matrix_file)\n return np.loadtxt(matrix_file)\n\n def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):\n \"\"\"Get data infos.\n\n This method gets information from the raw data.\n\n Args:\n num_workers (int, optional): Number of threads to be used.\n Default: 4.\n has_label (bool, optional): Whether the data has label.\n Default: True.\n sample_id_list (list[int], optional): Index list of the sample.\n Default: None.\n\n Returns:\n infos (list[dict]): Information of the raw data.\n \"\"\"\n\n def process_single_scene(sample_idx):\n print(f'{self.split} sample_idx: {sample_idx}')\n info = dict()\n pc_info = {'num_features': 6, 'lidar_idx': sample_idx}\n info['point_cloud'] = pc_info\n pts_filename = osp.join(self.root_dir, 'scannet_instance_data',\n f'{sample_idx}_vert.npy')\n points = np.load(pts_filename)\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))\n points.tofile(\n osp.join(self.root_dir, 'points', f'{sample_idx}.bin'))\n info['pts_path'] = osp.join('points', f'{sample_idx}.bin')\n\n ##########################superpoint#######################\n superpoints_filename = osp.join(self.root_dir, 'scannet_instance_data',\n f'{sample_idx}_superpoint.npy')\n superpoints = np.load(superpoints_filename)\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'superpoints'))\n superpoints.tofile(\n osp.join(self.root_dir, 'superpoints', f'{sample_idx}.bin'))\n info['pts_superpoints_path'] = osp.join('superpoints', f'{sample_idx}.bin')\n ###########################################################\n\n # update with RGB image paths if exist\n if os.path.exists(osp.join(self.root_dir, 'posed_images')):\n info['intrinsics'] = self.get_intrinsics(sample_idx)\n all_extrinsics = self.get_extrinsics(sample_idx)\n all_img_paths = self.get_images(sample_idx)\n # some poses in ScanNet are invalid\n extrinsics, img_paths = [], []\n for extrinsic, img_path in zip(all_extrinsics, all_img_paths):\n if np.all(np.isfinite(extrinsic)):\n img_paths.append(img_path)\n extrinsics.append(extrinsic)\n info['extrinsics'] = extrinsics\n info['img_paths'] = img_paths\n\n if not self.test_mode:\n pts_instance_mask_path = osp.join(\n self.root_dir, 'scannet_instance_data',\n f'{sample_idx}_ins_label.npy')\n pts_semantic_mask_path = osp.join(\n self.root_dir, 'scannet_instance_data',\n f'{sample_idx}_sem_label.npy')\n\n pts_instance_mask = np.load(pts_instance_mask_path).astype(\n np.int64)\n pts_semantic_mask = np.load(pts_semantic_mask_path).astype(\n np.int64)\n\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask'))\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask'))\n\n pts_instance_mask.tofile(\n osp.join(self.root_dir, 'instance_mask',\n f'{sample_idx}.bin'))\n pts_semantic_mask.tofile(\n osp.join(self.root_dir, 'semantic_mask',\n f'{sample_idx}.bin'))\n\n info['pts_instance_mask_path'] = osp.join(\n 'instance_mask', f'{sample_idx}.bin')\n info['pts_semantic_mask_path'] = osp.join(\n 'semantic_mask', f'{sample_idx}.bin')\n\n if has_label:\n annotations = {}\n # box is of shape [k, 6 + class]\n aligned_box_label = self.get_aligned_box_label(sample_idx)\n unaligned_box_label = self.get_unaligned_box_label(sample_idx)\n annotations['gt_num'] = aligned_box_label.shape[0]\n if annotations['gt_num'] != 0:\n aligned_box = aligned_box_label[:, :-1] # k, 6\n unaligned_box = unaligned_box_label[:, :-1]\n classes = aligned_box_label[:, -1] # k\n annotations['name'] = np.array([\n self.label2cat[self.cat_ids2class[classes[i]]]\n for i in range(annotations['gt_num'])\n ])\n # default names are given to aligned bbox for compatibility\n # we also save unaligned bbox info with marked names\n annotations['location'] = aligned_box[:, :3]\n annotations['dimensions'] = aligned_box[:, 3:6]\n annotations['gt_boxes_upright_depth'] = aligned_box\n annotations['unaligned_location'] = unaligned_box[:, :3]\n annotations['unaligned_dimensions'] = unaligned_box[:, 3:6]\n annotations[\n 'unaligned_gt_boxes_upright_depth'] = unaligned_box\n annotations['index'] = np.arange(\n annotations['gt_num'], dtype=np.int32)\n annotations['class'] = np.array([\n self.cat_ids2class[classes[i]]\n for i in range(annotations['gt_num'])\n ])\n axis_align_matrix = self.get_axis_align_matrix(sample_idx)\n annotations['axis_align_matrix'] = axis_align_matrix # 4x4\n info['annos'] = annotations\n return info\n\n sample_id_list = sample_id_list if sample_id_list is not None \\\n else self.sample_id_list\n with futures.ThreadPoolExecutor(num_workers) as executor:\n infos = executor.map(process_single_scene, sample_id_list)\n return list(infos)"
},
{
"identifier": "ScanNetSegData",
"path": "tools/data_converter/scannet_data_utils.py",
"snippet": "class ScanNetSegData(object):\n \"\"\"ScanNet dataset used to generate infos for semantic segmentation task.\n\n Args:\n data_root (str): Root path of the raw data.\n ann_file (str): The generated scannet infos.\n split (str, optional): Set split type of the data. Default: 'train'.\n num_points (int, optional): Number of points in each data input.\n Default: 8192.\n label_weight_func (function, optional): Function to compute the\n label weight. Default: None.\n \"\"\"\n\n def __init__(self,\n data_root,\n ann_file,\n split='train',\n num_points=8192,\n label_weight_func=None):\n self.data_root = data_root\n self.data_infos = mmcv.load(ann_file)\n self.split = split\n assert split in ['train', 'val', 'test']\n self.num_points = num_points\n\n self.all_ids = np.arange(41) # all possible ids\n self.cat_ids = np.array([\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36,\n 39\n ]) # used for seg task\n self.ignore_index = len(self.cat_ids)\n\n self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \\\n self.ignore_index\n for i, cat_id in enumerate(self.cat_ids):\n self.cat_id2class[cat_id] = i\n\n # label weighting function is taken from\n # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24\n self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \\\n label_weight_func is None else label_weight_func\n\n def get_seg_infos(self):\n if self.split == 'test':\n return\n scene_idxs, label_weight = self.get_scene_idxs_and_label_weight()\n save_folder = osp.join(self.data_root, 'seg_info')\n mmcv.mkdir_or_exist(save_folder)\n np.save(\n osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'),\n scene_idxs)\n np.save(\n osp.join(save_folder, f'{self.split}_label_weight.npy'),\n label_weight)\n print(f'{self.split} resampled scene index and label weight saved')\n\n def _convert_to_label(self, mask):\n \"\"\"Convert class_id in loaded segmentation mask to label.\"\"\"\n if isinstance(mask, str):\n if mask.endswith('npy'):\n mask = np.load(mask)\n else:\n mask = np.fromfile(mask, dtype=np.int64)\n label = self.cat_id2class[mask]\n return label\n\n def get_scene_idxs_and_label_weight(self):\n \"\"\"Compute scene_idxs for data sampling and label weight for loss\n calculation.\n\n We sample more times for scenes with more points. Label_weight is\n inversely proportional to number of class points.\n \"\"\"\n num_classes = len(self.cat_ids)\n num_point_all = []\n label_weight = np.zeros((num_classes + 1, )) # ignore_index\n for data_info in self.data_infos:\n label = self._convert_to_label(\n osp.join(self.data_root, data_info['pts_semantic_mask_path']))\n num_point_all.append(label.shape[0])\n class_count, _ = np.histogram(label, range(num_classes + 2))\n label_weight += class_count\n\n # repeat scene_idx for num_scene_point // num_sample_point times\n sample_prob = np.array(num_point_all) / float(np.sum(num_point_all))\n num_iter = int(np.sum(num_point_all) / float(self.num_points))\n scene_idxs = []\n for idx in range(len(self.data_infos)):\n scene_idxs.extend([idx] * int(round(sample_prob[idx] * num_iter)))\n scene_idxs = np.array(scene_idxs).astype(np.int32)\n\n # calculate label weight, adopted from PointNet++\n label_weight = label_weight[:-1].astype(np.float32)\n label_weight = label_weight / label_weight.sum()\n label_weight = self.label_weight_func(label_weight).astype(np.float32)\n\n return scene_idxs, label_weight"
},
{
"identifier": "SUNRGBDData",
"path": "tools/data_converter/sunrgbd_data_utils.py",
"snippet": "class SUNRGBDData(object):\n \"\"\"SUNRGBD data.\n\n Generate scannet infos for sunrgbd_converter.\n\n Args:\n root_path (str): Root path of the raw data.\n split (str, optional): Set split type of the data. Default: 'train'.\n use_v1 (bool, optional): Whether to use v1. Default: False.\n \"\"\"\n\n def __init__(self, root_path, split='train', use_v1=False):\n self.root_dir = root_path\n self.split = split\n self.split_dir = osp.join(root_path, 'sunrgbd_trainval')\n self.classes = [\n 'bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser',\n 'night_stand', 'bookshelf', 'bathtub'\n ]\n self.cat2label = {cat: self.classes.index(cat) for cat in self.classes}\n self.label2cat = {\n label: self.classes[label]\n for label in range(len(self.classes))\n }\n assert split in ['train', 'val', 'test']\n split_file = osp.join(self.split_dir, f'{split}_data_idx.txt')\n mmcv.check_file_exist(split_file)\n self.sample_id_list = map(int, mmcv.list_from_file(split_file))\n self.image_dir = osp.join(self.split_dir, 'image')\n self.calib_dir = osp.join(self.split_dir, 'calib')\n self.depth_dir = osp.join(self.split_dir, 'depth')\n if use_v1:\n self.label_dir = osp.join(self.split_dir, 'label_v1')\n else:\n self.label_dir = osp.join(self.split_dir, 'label')\n\n def __len__(self):\n return len(self.sample_id_list)\n\n def get_image(self, idx):\n img_filename = osp.join(self.image_dir, f'{idx:06d}.jpg')\n return mmcv.imread(img_filename)\n\n def get_image_shape(self, idx):\n image = self.get_image(idx)\n return np.array(image.shape[:2], dtype=np.int32)\n\n def get_depth(self, idx):\n depth_filename = osp.join(self.depth_dir, f'{idx:06d}.mat')\n depth = sio.loadmat(depth_filename)['instance']\n return depth\n\n def get_calibration(self, idx):\n calib_filepath = osp.join(self.calib_dir, f'{idx:06d}.txt')\n lines = [line.rstrip() for line in open(calib_filepath)]\n Rt = np.array([float(x) for x in lines[0].split(' ')])\n Rt = np.reshape(Rt, (3, 3), order='F').astype(np.float32)\n K = np.array([float(x) for x in lines[1].split(' ')])\n K = np.reshape(K, (3, 3), order='F').astype(np.float32)\n return K, Rt\n\n def get_label_objects(self, idx):\n label_filename = osp.join(self.label_dir, f'{idx:06d}.txt')\n lines = [line.rstrip() for line in open(label_filename)]\n objects = [SUNRGBDInstance(line) for line in lines]\n return objects\n\n def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):\n \"\"\"Get data infos.\n\n This method gets information from the raw data.\n\n Args:\n num_workers (int, optional): Number of threads to be used.\n Default: 4.\n has_label (bool, optional): Whether the data has label.\n Default: True.\n sample_id_list (list[int], optional): Index list of the sample.\n Default: None.\n\n Returns:\n infos (list[dict]): Information of the raw data.\n \"\"\"\n\n def process_single_scene(sample_idx):\n print(f'{self.split} sample_idx: {sample_idx}')\n # convert depth to points\n # SAMPLE_NUM = 50000 # we do not down sampling points\n # TODO: Check whether can move the point\n # sampling process during training.\n pc_upright_depth = self.get_depth(sample_idx)\n # pc_upright_depth_subsampled = random_sampling(\n # pc_upright_depth, SAMPLE_NUM)\n pc_upright_depth_subsampled = pc_upright_depth\n\n info = dict()\n pc_info = {'num_features': 6, 'lidar_idx': sample_idx}\n info['point_cloud'] = pc_info\n\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))\n pc_upright_depth_subsampled.tofile(\n osp.join(self.root_dir, 'points', f'{sample_idx:06d}.bin'))\n\n info['pts_path'] = osp.join('points', f'{sample_idx:06d}.bin')\n \n ##########################superpoint#######################\n info['pts_superpoints_path'] = osp.join('superpoints', f'{sample_idx:06d}.bin')\n ###########################################################\n \n img_path = osp.join('image', f'{sample_idx:06d}.jpg')\n image_info = {\n 'image_idx': sample_idx,\n 'image_shape': self.get_image_shape(sample_idx),\n 'image_path': img_path\n }\n info['image'] = image_info\n\n K, Rt = self.get_calibration(sample_idx)\n calib_info = {'K': K, 'Rt': Rt}\n info['calib'] = calib_info\n\n if has_label:\n obj_list = self.get_label_objects(sample_idx)\n annotations = {}\n annotations['gt_num'] = len([\n obj.classname for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ])\n if annotations['gt_num'] != 0:\n annotations['name'] = np.array([\n obj.classname for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ])\n annotations['bbox'] = np.concatenate([\n obj.box2d.reshape(1, 4) for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ],\n axis=0)\n annotations['location'] = np.concatenate([\n obj.centroid.reshape(1, 3) for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ],\n axis=0)\n annotations['dimensions'] = 2 * np.array([\n [obj.length, obj.width, obj.height] for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ]) # lwh (depth) format\n annotations['rotation_y'] = np.array([\n obj.heading_angle for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ])\n annotations['index'] = np.arange(\n len(obj_list), dtype=np.int32)\n annotations['class'] = np.array([\n self.cat2label[obj.classname] for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ])\n annotations['gt_boxes_upright_depth'] = np.stack(\n [\n obj.box3d for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ],\n axis=0) # (K,8)\n info['annos'] = annotations\n return info\n\n sample_id_list = sample_id_list if \\\n sample_id_list is not None else self.sample_id_list\n with futures.ThreadPoolExecutor(num_workers) as executor:\n infos = executor.map(process_single_scene, sample_id_list)\n return list(infos)"
}
] | import os
import mmcv
import numpy as np
from tools.data_converter.s3dis_data_utils import S3DISData, S3DISSegData
from tools.data_converter.scannet_data_utils import ScanNetData, ScanNetSegData
from tools.data_converter.scannet_md40_data_utils import ScanNetData_md40, ScanNetSegData_md40
from tools.data_converter.sunrgbd_data_utils import SUNRGBDData | 8,142 | # Copyright (c) OpenMMLab. All rights reserved.
def create_indoor_info_file(data_path,
pkl_prefix='sunrgbd',
save_path=None,
use_v1=False,
workers=4):
"""Create indoor information file.
Get information of the raw data and save it to the pkl file.
Args:
data_path (str): Path of the data.
pkl_prefix (str, optional): Prefix of the pkl to be saved.
Default: 'sunrgbd'.
save_path (str, optional): Path of the pkl to be saved. Default: None.
use_v1 (bool, optional): Whether to use v1. Default: False.
workers (int, optional): Number of threads to be used. Default: 4.
"""
assert os.path.exists(data_path)
assert pkl_prefix in ['sunrgbd', 'scannet', 's3dis', 'scannet_md40'], \
f'unsupported indoor dataset {pkl_prefix}'
save_path = data_path if save_path is None else save_path
assert os.path.exists(save_path)
# generate infos for both detection and segmentation task
if pkl_prefix in ['sunrgbd', 'scannet', 'scannet_md40']:
train_filename = os.path.join(save_path,
f'{pkl_prefix}_infos_train.pkl')
val_filename = os.path.join(save_path, f'{pkl_prefix}_infos_val.pkl')
if pkl_prefix == 'sunrgbd':
# SUN RGB-D has a train-val split
| # Copyright (c) OpenMMLab. All rights reserved.
def create_indoor_info_file(data_path,
pkl_prefix='sunrgbd',
save_path=None,
use_v1=False,
workers=4):
"""Create indoor information file.
Get information of the raw data and save it to the pkl file.
Args:
data_path (str): Path of the data.
pkl_prefix (str, optional): Prefix of the pkl to be saved.
Default: 'sunrgbd'.
save_path (str, optional): Path of the pkl to be saved. Default: None.
use_v1 (bool, optional): Whether to use v1. Default: False.
workers (int, optional): Number of threads to be used. Default: 4.
"""
assert os.path.exists(data_path)
assert pkl_prefix in ['sunrgbd', 'scannet', 's3dis', 'scannet_md40'], \
f'unsupported indoor dataset {pkl_prefix}'
save_path = data_path if save_path is None else save_path
assert os.path.exists(save_path)
# generate infos for both detection and segmentation task
if pkl_prefix in ['sunrgbd', 'scannet', 'scannet_md40']:
train_filename = os.path.join(save_path,
f'{pkl_prefix}_infos_train.pkl')
val_filename = os.path.join(save_path, f'{pkl_prefix}_infos_val.pkl')
if pkl_prefix == 'sunrgbd':
# SUN RGB-D has a train-val split | train_dataset = SUNRGBDData( | 4 | 2023-12-21 12:50:35+00:00 | 12k |
jdejaegh/irm-kmi-ha | custom_components/irm_kmi/coordinator.py | [
{
"identifier": "IrmKmiApiClient",
"path": "custom_components/irm_kmi/api.py",
"snippet": "class IrmKmiApiClient:\n \"\"\"API client for IRM KMI weather data\"\"\"\n COORD_DECIMALS = 6\n\n def __init__(self, session: aiohttp.ClientSession) -> None:\n self._session = session\n self._base_url = \"https://app.meteo.be/services/appv4/\"\n\n async def get_forecasts_coord(self, coord: dict) -> dict:\n \"\"\"Get forecasts for given city.\"\"\"\n assert 'lat' in coord\n assert 'long' in coord\n coord['lat'] = round(coord['lat'], self.COORD_DECIMALS)\n coord['long'] = round(coord['long'], self.COORD_DECIMALS)\n\n response = await self._api_wrapper(params={\"s\": \"getForecasts\", \"k\": _api_key(\"getForecasts\")} | coord)\n return await response.json()\n\n async def get_image(self, url, params: dict | None = None) -> bytes:\n \"\"\"Get the image at the specified url with the parameters\"\"\"\n r: ClientResponse = await self._api_wrapper(base_url=url, params={} if params is None else params)\n return await r.read()\n\n async def _api_wrapper(\n self,\n params: dict,\n base_url: str | None = None,\n path: str = \"\",\n method: str = \"get\",\n data: dict | None = None,\n headers: dict | None = None,\n ) -> any:\n \"\"\"Get information from the API.\"\"\"\n\n try:\n async with async_timeout.timeout(10):\n response = await self._session.request(\n method=method,\n url=f\"{self._base_url if base_url is None else base_url}{path}\",\n headers=headers,\n json=data,\n params=params\n )\n response.raise_for_status()\n return response\n\n except asyncio.TimeoutError as exception:\n raise IrmKmiApiCommunicationError(\"Timeout error fetching information\") from exception\n except (aiohttp.ClientError, socket.gaierror) as exception:\n raise IrmKmiApiCommunicationError(\"Error fetching information\") from exception\n except Exception as exception: # pylint: disable=broad-except\n raise IrmKmiApiError(f\"Something really wrong happened! {exception}\") from exception"
},
{
"identifier": "IrmKmiApiError",
"path": "custom_components/irm_kmi/api.py",
"snippet": "class IrmKmiApiError(Exception):\n \"\"\"Exception to indicate a general API error.\"\"\""
},
{
"identifier": "CONF_DARK_MODE",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONF_DARK_MODE: Final = \"dark_mode\""
},
{
"identifier": "CONF_STYLE",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONF_STYLE: Final = \"style\""
},
{
"identifier": "DOMAIN",
"path": "custom_components/irm_kmi/const.py",
"snippet": "DOMAIN: Final = 'irm_kmi'"
},
{
"identifier": "IRM_KMI_TO_HA_CONDITION_MAP",
"path": "custom_components/irm_kmi/const.py",
"snippet": "IRM_KMI_TO_HA_CONDITION_MAP: Final = {\n (0, 'd'): ATTR_CONDITION_SUNNY,\n (0, 'n'): ATTR_CONDITION_CLEAR_NIGHT,\n (1, 'd'): ATTR_CONDITION_SUNNY,\n (1, 'n'): ATTR_CONDITION_CLEAR_NIGHT,\n (2, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (2, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (3, 'd'): ATTR_CONDITION_PARTLYCLOUDY,\n (3, 'n'): ATTR_CONDITION_PARTLYCLOUDY,\n (4, 'd'): ATTR_CONDITION_POURING,\n (4, 'n'): ATTR_CONDITION_POURING,\n (5, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (5, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (6, 'd'): ATTR_CONDITION_POURING,\n (6, 'n'): ATTR_CONDITION_POURING,\n (7, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (7, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (8, 'd'): ATTR_CONDITION_SNOWY_RAINY,\n (8, 'n'): ATTR_CONDITION_SNOWY_RAINY,\n (9, 'd'): ATTR_CONDITION_SNOWY_RAINY,\n (9, 'n'): ATTR_CONDITION_SNOWY_RAINY,\n (10, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (10, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (11, 'd'): ATTR_CONDITION_SNOWY,\n (11, 'n'): ATTR_CONDITION_SNOWY,\n (12, 'd'): ATTR_CONDITION_SNOWY,\n (12, 'n'): ATTR_CONDITION_SNOWY,\n (13, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (13, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (14, 'd'): ATTR_CONDITION_CLOUDY,\n (14, 'n'): ATTR_CONDITION_CLOUDY,\n (15, 'd'): ATTR_CONDITION_CLOUDY,\n (15, 'n'): ATTR_CONDITION_CLOUDY,\n (16, 'd'): ATTR_CONDITION_POURING,\n (16, 'n'): ATTR_CONDITION_POURING,\n (17, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (17, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (18, 'd'): ATTR_CONDITION_RAINY,\n (18, 'n'): ATTR_CONDITION_RAINY,\n (19, 'd'): ATTR_CONDITION_POURING,\n (19, 'n'): ATTR_CONDITION_POURING,\n (20, 'd'): ATTR_CONDITION_SNOWY_RAINY,\n (20, 'n'): ATTR_CONDITION_SNOWY_RAINY,\n (21, 'd'): ATTR_CONDITION_EXCEPTIONAL,\n (21, 'n'): ATTR_CONDITION_EXCEPTIONAL,\n (22, 'd'): ATTR_CONDITION_SNOWY,\n (22, 'n'): ATTR_CONDITION_SNOWY,\n (23, 'd'): ATTR_CONDITION_SNOWY,\n (23, 'n'): ATTR_CONDITION_SNOWY,\n (24, 'd'): ATTR_CONDITION_FOG,\n (24, 'n'): ATTR_CONDITION_FOG,\n (25, 'd'): ATTR_CONDITION_FOG,\n (25, 'n'): ATTR_CONDITION_FOG,\n (26, 'd'): ATTR_CONDITION_FOG,\n (26, 'n'): ATTR_CONDITION_FOG,\n (27, 'd'): ATTR_CONDITION_EXCEPTIONAL,\n (27, 'n'): ATTR_CONDITION_EXCEPTIONAL\n}"
},
{
"identifier": "LANGS",
"path": "custom_components/irm_kmi/const.py",
"snippet": "LANGS: Final = ['en', 'fr', 'nl', 'de']"
},
{
"identifier": "MAP_WARNING_ID_TO_SLUG",
"path": "custom_components/irm_kmi/const.py",
"snippet": "MAP_WARNING_ID_TO_SLUG: Final = {\n 0: 'wind',\n 1: 'rain',\n 2: 'ice_or_snow',\n 3: 'thunder',\n 7: 'fog',\n 9: 'cold',\n 12: 'thunder_wind_rain',\n 13: 'thunderstorm_strong_gusts',\n 14: 'thunderstorm_large_rainfall',\n 15: 'storm_surge',\n 17: 'coldspell'}"
},
{
"identifier": "OPTION_STYLE_SATELLITE",
"path": "custom_components/irm_kmi/const.py",
"snippet": "OPTION_STYLE_SATELLITE: Final = 'satellite_style'"
},
{
"identifier": "OUT_OF_BENELUX",
"path": "custom_components/irm_kmi/const.py",
"snippet": "OUT_OF_BENELUX: Final = [\"außerhalb der Benelux (Brussels)\",\n \"Hors de Belgique (Bxl)\",\n \"Outside the Benelux (Brussels)\",\n \"Buiten de Benelux (Brussel)\"]"
},
{
"identifier": "STYLE_TO_PARAM_MAP",
"path": "custom_components/irm_kmi/const.py",
"snippet": "STYLE_TO_PARAM_MAP: Final = {\n OPTION_STYLE_STD: 1,\n OPTION_STYLE_CONTRAST: 2,\n OPTION_STYLE_YELLOW_RED: 3,\n OPTION_STYLE_SATELLITE: 4\n}"
},
{
"identifier": "AnimationFrameData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class AnimationFrameData(TypedDict, total=False):\n \"\"\"Holds one single frame of the radar camera, along with the timestamp of the frame\"\"\"\n time: datetime | None\n image: bytes | None\n value: float | None\n position: float | None\n position_higher: float | None\n position_lower: float | None"
},
{
"identifier": "CurrentWeatherData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class CurrentWeatherData(TypedDict, total=False):\n \"\"\"Class to hold the currently observable weather at a given location\"\"\"\n condition: str | None\n temperature: float | None\n wind_speed: float | None\n wind_gust_speed: float | None\n wind_bearing: float | str | None\n uv_index: float | None\n pressure: float | None"
},
{
"identifier": "IrmKmiForecast",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class IrmKmiForecast(Forecast):\n \"\"\"Forecast class with additional attributes for IRM KMI\"\"\"\n\n # TODO: add condition_2 as well and evolution to match data from the API?\n # TODO: remove the _fr and _nl to have only one 'text' attribute\n text_fr: str | None\n text_nl: str | None"
},
{
"identifier": "ProcessedCoordinatorData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class ProcessedCoordinatorData(TypedDict, total=False):\n \"\"\"Data class that will be exposed to the entities consuming data from an IrmKmiCoordinator\"\"\"\n current_weather: CurrentWeatherData\n hourly_forecast: List[Forecast] | None\n daily_forecast: List[IrmKmiForecast] | None\n animation: RadarAnimationData\n warnings: List[WarningData] | None"
},
{
"identifier": "RadarAnimationData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class RadarAnimationData(TypedDict, total=False):\n \"\"\"Holds frames and additional data for the animation to be rendered\"\"\"\n sequence: List[AnimationFrameData] | None\n most_recent_image_idx: int | None\n hint: str | None\n unit: str | None\n location: bytes | None\n svg_still: bytes | None\n svg_animated: bytes | None"
},
{
"identifier": "WarningData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class WarningData(TypedDict, total=False):\n \"\"\"Holds data about a specific warning\"\"\"\n slug: str\n id: int\n level: int\n friendly_name: str\n text: str\n starts_at: datetime\n ends_at: datetime"
},
{
"identifier": "RainGraph",
"path": "custom_components/irm_kmi/rain_graph.py",
"snippet": "class RainGraph:\n def __init__(self,\n animation_data: RadarAnimationData,\n background_image_path: str,\n background_size: (int, int),\n dark_mode: bool = False,\n tz: str = 'UTC',\n svg_width: float = 640,\n inset: float = 20,\n graph_height: float = 150,\n top_text_space: float = 30,\n top_text_y_pos: float = 20,\n bottom_text_space: float = 50,\n bottom_text_y_pos: float = 218,\n auto=True\n ):\n\n self._animation_data: RadarAnimationData = animation_data\n self._background_image_path: str = background_image_path\n self._background_size: (int, int) = background_size\n self._dark_mode: bool = dark_mode\n self._tz = pytz.timezone(tz)\n self._svg_width: float = svg_width\n self._inset: float = inset\n self._graph_height: float = graph_height\n self._top_text_space: float = top_text_space + background_size[1]\n self._top_text_y_pos: float = top_text_y_pos + background_size[1]\n self._bottom_text_space: float = bottom_text_space\n self._bottom_text_y_pos: float = bottom_text_y_pos + background_size[1]\n\n self._frame_count: int = len(self._animation_data['sequence'])\n self._graph_width: float = self._svg_width - 2 * self._inset\n self._graph_bottom: float = self._top_text_space + self._graph_height\n self._svg_height: float = self._graph_height + self._top_text_space + self._bottom_text_space\n self._interval_width: float = self._graph_width / self._frame_count\n self._offset: float = self._inset + self._interval_width / 2\n\n if not (0 <= self._top_text_y_pos <= self._top_text_space):\n raise ValueError(\"It must hold that 0 <= top_text_y_pos <= top_text_space\")\n\n if not (self._graph_bottom <= self._bottom_text_y_pos <= self._graph_bottom + self._bottom_text_space):\n raise ValueError(\"bottom_text_y_pos must be below the graph\")\n\n self._dwg: Drawing = Drawing(size=(self._svg_width, self._svg_height), profile='full')\n self._dwg_save: Drawing\n self._dwg_animated: Drawing\n self._dwg_still: Drawing\n\n if auto:\n self.draw_svg_frame()\n self.draw_hour_bars()\n self.draw_chances_path()\n self.draw_data_line()\n self.write_hint()\n self.insert_background()\n self._dwg_save = copy.deepcopy(self._dwg)\n\n self.draw_current_fame_line()\n self.draw_description_text()\n self.insert_cloud_layer()\n self.draw_location()\n self._dwg_animated = self._dwg\n\n self._dwg = self._dwg_save\n idx = self._animation_data['most_recent_image_idx']\n self.draw_current_fame_line(idx)\n self.draw_description_text(idx)\n self.insert_cloud_layer(idx)\n self.draw_location()\n self._dwg_still = self._dwg\n\n def draw_svg_frame(self):\n \"\"\"Create the global area to draw the other items\"\"\"\n self._dwg.embed_font(name=\"Roboto Medium\", filename='custom_components/irm_kmi/resources/roboto_medium.ttf')\n self._dwg.embed_stylesheet(\"\"\"\n .roboto {\n font-family: \"Roboto Medium\";\n }\n \"\"\")\n\n fill_color = '#393C40' if self._dark_mode else '#385E95'\n self._dwg.add(self._dwg.rect(insert=(0, 0),\n size=(self._svg_width, self._svg_height),\n rx=None, ry=None,\n fill=fill_color, stroke='none'))\n\n def draw_description_text(self, idx: int | None = None):\n \"\"\"For every frame write the amount of precipitation and the time at the top of the graph.\n If idx is set, only do it for the given idx\"\"\"\n\n times = [e['time'].astimezone(tz=self._tz).strftime('%H:%M') for e in\n self._animation_data['sequence']]\n rain_levels = [f\"{e['value']}{self._animation_data['unit']}\" for e in self._animation_data['sequence']]\n\n if idx is not None:\n time = times[idx]\n rain_level = rain_levels[idx]\n\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n\n self.write_time_and_rain(paragraph, rain_level, time)\n return\n\n for i in range(self._frame_count):\n time = times[i]\n rain_level = rain_levels[i]\n\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n\n values = ['hidden'] * self._frame_count\n values[i] = 'visible'\n\n paragraph.add(Animate(\n attributeName=\"visibility\",\n values=\";\".join(values),\n dur=f\"{self._frame_count * 0.3}s\",\n begin=\"0s\",\n repeatCount=\"indefinite\"\n ))\n\n self.write_time_and_rain(paragraph, rain_level, time)\n\n def write_time_and_rain(self, paragraph, rain_level, time):\n \"\"\"Using the paragraph object, write the time and rain level data\"\"\"\n paragraph.add(self._dwg.text(f\"{time}\", insert=(self._offset, self._top_text_y_pos),\n text_anchor=\"start\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n paragraph.add(self._dwg.text(f\"{rain_level}\", insert=(self._svg_width / 2, self._top_text_y_pos),\n text_anchor=\"middle\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n\n def write_hint(self):\n \"\"\"Add the hint text at the bottom of the graph\"\"\"\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n\n hint = self._animation_data['hint']\n\n paragraph.add(self._dwg.text(f\"{hint}\", insert=(self._svg_width / 2, self._bottom_text_y_pos),\n text_anchor=\"middle\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n\n def draw_chances_path(self):\n \"\"\"Draw the prevision margin area around the main forecast line\"\"\"\n list_lower_points = []\n list_higher_points = []\n\n rain_list: List[AnimationFrameData] = self._animation_data['sequence']\n graph_rect_left = self._offset\n graph_rect_top = self._top_text_space\n\n for i in range(len(rain_list)):\n position_higher = rain_list[i]['position_higher']\n if position_higher is not None:\n list_higher_points.append((graph_rect_left, graph_rect_top + (\n 1.0 - position_higher) * self._graph_height))\n graph_rect_left += self._interval_width\n\n graph_rect_right = graph_rect_left - self._interval_width\n for i in range(len(rain_list) - 1, -1, -1):\n position_lower = rain_list[i]['position_lower']\n if position_lower is not None:\n list_lower_points.append((graph_rect_right, graph_rect_top + (\n 1.0 - position_lower) * self._graph_height))\n graph_rect_right -= self._interval_width\n\n if list_higher_points and list_lower_points:\n self.draw_chance_precip(list_higher_points, list_lower_points)\n\n def draw_chance_precip(self, list_higher_points: List, list_lower_points: List):\n \"\"\"Draw the blue solid line representing the actual rain forecast\"\"\"\n precip_higher_chance_path = self._dwg.path(fill='#63c8fa', stroke='none', opacity=.3)\n\n list_higher_points[-1] = tuple(list(list_higher_points[-1]) + ['last'])\n\n self.set_curved_path(precip_higher_chance_path, list_higher_points + list_lower_points)\n self._dwg.add(precip_higher_chance_path)\n\n @staticmethod\n def set_curved_path(path, points):\n \"\"\"Pushes points on the path by creating a nice curve between them\"\"\"\n if len(points) < 2:\n return\n\n path.push('M', *points[0])\n\n for i in range(1, len(points)):\n x_mid = (points[i - 1][0] + points[i][0]) / 2\n y_mid = (points[i - 1][1] + points[i][1]) / 2\n\n path.push('Q', points[i - 1][0], points[i - 1][1], x_mid, y_mid)\n if points[i][-1] == 'last' or points[i - 1][-1] == 'last':\n path.push('Q', points[i][0], points[i][1], points[i][0], points[i][1])\n\n path.push('Q', points[-1][0], points[-1][1], points[-1][0], points[-1][1])\n\n def draw_data_line(self):\n \"\"\"Draw the main data line for the rain forecast\"\"\"\n rain_list: List[AnimationFrameData] = self._animation_data['sequence']\n graph_rect_left = self._offset\n graph_rect_top = self._top_text_space\n\n entry_list = []\n\n for i in range(len(rain_list)):\n position = rain_list[i]['position']\n entry_list.append(\n (graph_rect_left,\n graph_rect_top + (1.0 - position) * self._graph_height))\n graph_rect_left += self._interval_width\n data_line_path = self._dwg.path(fill='none', stroke='#63c8fa', stroke_width=2)\n self.set_curved_path(data_line_path, entry_list)\n self._dwg.add(data_line_path)\n\n def draw_hour_bars(self):\n \"\"\"Draw the small bars at the bottom to represent the time\"\"\"\n hour_bar_height = 8\n horizontal_inset = self._offset\n\n for (i, rain_item) in enumerate(self._animation_data['sequence']):\n time_image = rain_item['time'].astimezone(tz=self._tz)\n is_hour_bar = time_image.minute == 0\n\n x_position = horizontal_inset\n if i == self._animation_data['most_recent_image_idx']:\n self._dwg.add(self._dwg.line(start=(x_position, self._top_text_space),\n end=(x_position, self._graph_bottom),\n stroke='white',\n opacity=0.5,\n stroke_dasharray=4))\n\n self._dwg.add(self._dwg.line(start=(x_position, self._graph_bottom - hour_bar_height),\n end=(x_position, self._graph_bottom),\n stroke='white' if is_hour_bar else 'lightgrey',\n opacity=0.9 if is_hour_bar else 0.7))\n\n if is_hour_bar:\n graph_rect_center_x = x_position\n graph_rect_center_y = self._graph_bottom + 18\n\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n paragraph.add(self._dwg.text(f\"{time_image.hour}h\", insert=(graph_rect_center_x, graph_rect_center_y),\n text_anchor=\"middle\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n\n horizontal_inset += self._interval_width\n\n self._dwg.add(self._dwg.line(start=(self._offset, self._graph_bottom),\n end=(self._graph_width + self._interval_width / 2, self._graph_bottom),\n stroke='white'))\n\n def draw_current_fame_line(self, idx: int | None = None):\n \"\"\"Draw a solid white line on the timeline at the position of the given frame index\"\"\"\n x_position = self._offset if idx is None else self._offset + idx * self._interval_width\n now = self._dwg.add(self._dwg.line(start=(x_position, self._top_text_space),\n end=(x_position, self._graph_bottom),\n id='now',\n stroke='white',\n opacity=1,\n stroke_width=2))\n if idx is not None:\n return\n now.add(self._dwg.animateTransform(\"translate\", \"transform\",\n id=\"now\",\n from_=f\"{self._offset} 0\",\n to=f\"{self._graph_width - self._offset} 0\",\n dur=f\"{self._frame_count * 0.3}s\",\n repeatCount=\"indefinite\"))\n\n def get_svg_string(self, still_image: bool = False) -> bytes:\n return self._dwg_still.tostring().encode() if still_image else self._dwg_animated.tostring().encode()\n\n def insert_background(self):\n with open(self._background_image_path, 'rb') as f:\n png_data = base64.b64encode(f.read()).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n\n def insert_cloud_layer(self, idx: int | None = None):\n imgs = [e['image'] for e in self._animation_data['sequence']]\n\n if idx is not None:\n img = imgs[idx]\n png_data = base64.b64encode(img).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n return\n\n for i, img in enumerate(imgs):\n png_data = base64.b64encode(img).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n\n values = ['hidden'] * self._frame_count\n values[i] = 'visible'\n\n image.add(Animate(\n attributeName=\"visibility\",\n values=\";\".join(values),\n dur=f\"{self._frame_count * 0.3}s\",\n begin=\"0s\",\n repeatCount=\"indefinite\"\n ))\n\n def draw_location(self):\n img = self._animation_data['location']\n png_data = base64.b64encode(img).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n\n def get_dwg(self):\n return copy.deepcopy(self._dwg)"
},
{
"identifier": "disable_from_config",
"path": "custom_components/irm_kmi/utils.py",
"snippet": "def disable_from_config(hass: HomeAssistant, config_entry: ConfigEntry):\n modify_from_config(hass, config_entry.entry_id, False)"
},
{
"identifier": "get_config_value",
"path": "custom_components/irm_kmi/utils.py",
"snippet": "def get_config_value(config_entry: ConfigEntry, key: str) -> Any:\n if config_entry.options and key in config_entry.options:\n return config_entry.options[key]\n return config_entry.data[key]"
}
] | import asyncio
import logging
import async_timeout
import pytz
from datetime import datetime, timedelta
from typing import Any, List, Tuple
from homeassistant.components.weather import Forecast
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE, CONF_ZONE
from homeassistant.core import HomeAssistant
from homeassistant.helpers import issue_registry
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import (DataUpdateCoordinator,
UpdateFailed)
from .api import IrmKmiApiClient, IrmKmiApiError
from .const import CONF_DARK_MODE, CONF_STYLE, DOMAIN
from .const import IRM_KMI_TO_HA_CONDITION_MAP as CDT_MAP
from .const import LANGS
from .const import MAP_WARNING_ID_TO_SLUG as SLUG_MAP
from .const import OPTION_STYLE_SATELLITE, OUT_OF_BENELUX, STYLE_TO_PARAM_MAP
from .data import (AnimationFrameData, CurrentWeatherData, IrmKmiForecast,
ProcessedCoordinatorData, RadarAnimationData, WarningData)
from .rain_graph import RainGraph
from .utils import disable_from_config, get_config_value | 7,235 | """DataUpdateCoordinator for the IRM KMI integration."""
_LOGGER = logging.getLogger(__name__)
class IrmKmiCoordinator(DataUpdateCoordinator):
"""Coordinator to update data from IRM KMI"""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry):
"""Initialize the coordinator."""
super().__init__(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="IRM KMI weather",
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(minutes=7),
)
self._api_client = IrmKmiApiClient(session=async_get_clientsession(hass))
self._zone = get_config_value(entry, CONF_ZONE)
self._dark_mode = get_config_value(entry, CONF_DARK_MODE)
self._style = get_config_value(entry, CONF_STYLE)
self._config_entry = entry
async def _async_update_data(self) -> ProcessedCoordinatorData:
"""Fetch data from API endpoint.
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
if (zone := self.hass.states.get(self._zone)) is None:
raise UpdateFailed(f"Zone '{self._zone}' not found")
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
async with async_timeout.timeout(10):
api_data = await self._api_client.get_forecasts_coord(
{'lat': zone.attributes[ATTR_LATITUDE],
'long': zone.attributes[ATTR_LONGITUDE]}
)
_LOGGER.debug(f"Observation for {api_data.get('cityName', '')}: {api_data.get('obs', '{}')}")
except IrmKmiApiError as err:
raise UpdateFailed(f"Error communicating with API: {err}")
if api_data.get('cityName', None) in OUT_OF_BENELUX:
# TODO create a repair when this triggers
_LOGGER.info(f"Config state: {self._config_entry.state}")
_LOGGER.error(f"The zone {self._zone} is now out of Benelux and forecast is only available in Benelux."
f"Associated device is now disabled. Move the zone back in Benelux and re-enable to fix "
f"this")
disable_from_config(self.hass, self._config_entry)
issue_registry.async_create_issue(
self.hass,
DOMAIN,
"zone_moved",
is_fixable=True,
severity=issue_registry.IssueSeverity.ERROR,
translation_key='zone_moved',
data={'config_entry_id': self._config_entry.entry_id, 'zone': self._zone},
translation_placeholders={'zone': self._zone}
)
return ProcessedCoordinatorData()
return await self.process_api_data(api_data)
async def async_refresh(self) -> None:
"""Refresh data and log errors."""
await self._async_refresh(log_failures=True, raise_on_entry_error=True)
async def _async_animation_data(self, api_data: dict) -> RadarAnimationData:
"""From the API data passed in, call the API to get all the images and create the radar animation data object.
Frames from the API are merged with the background map and the location marker to create each frame."""
animation_data = api_data.get('animation', {}).get('sequence')
localisation_layer_url = api_data.get('animation', {}).get('localisationLayer')
country = api_data.get('country', '')
if animation_data is None or localisation_layer_url is None or not isinstance(animation_data, list):
return RadarAnimationData()
try:
images_from_api = await self.download_images_from_api(animation_data, country, localisation_layer_url)
except IrmKmiApiError:
_LOGGER.warning(f"Could not get images for weather radar")
return RadarAnimationData()
localisation = images_from_api[0]
images_from_api = images_from_api[1:]
| """DataUpdateCoordinator for the IRM KMI integration."""
_LOGGER = logging.getLogger(__name__)
class IrmKmiCoordinator(DataUpdateCoordinator):
"""Coordinator to update data from IRM KMI"""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry):
"""Initialize the coordinator."""
super().__init__(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="IRM KMI weather",
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(minutes=7),
)
self._api_client = IrmKmiApiClient(session=async_get_clientsession(hass))
self._zone = get_config_value(entry, CONF_ZONE)
self._dark_mode = get_config_value(entry, CONF_DARK_MODE)
self._style = get_config_value(entry, CONF_STYLE)
self._config_entry = entry
async def _async_update_data(self) -> ProcessedCoordinatorData:
"""Fetch data from API endpoint.
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
if (zone := self.hass.states.get(self._zone)) is None:
raise UpdateFailed(f"Zone '{self._zone}' not found")
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
async with async_timeout.timeout(10):
api_data = await self._api_client.get_forecasts_coord(
{'lat': zone.attributes[ATTR_LATITUDE],
'long': zone.attributes[ATTR_LONGITUDE]}
)
_LOGGER.debug(f"Observation for {api_data.get('cityName', '')}: {api_data.get('obs', '{}')}")
except IrmKmiApiError as err:
raise UpdateFailed(f"Error communicating with API: {err}")
if api_data.get('cityName', None) in OUT_OF_BENELUX:
# TODO create a repair when this triggers
_LOGGER.info(f"Config state: {self._config_entry.state}")
_LOGGER.error(f"The zone {self._zone} is now out of Benelux and forecast is only available in Benelux."
f"Associated device is now disabled. Move the zone back in Benelux and re-enable to fix "
f"this")
disable_from_config(self.hass, self._config_entry)
issue_registry.async_create_issue(
self.hass,
DOMAIN,
"zone_moved",
is_fixable=True,
severity=issue_registry.IssueSeverity.ERROR,
translation_key='zone_moved',
data={'config_entry_id': self._config_entry.entry_id, 'zone': self._zone},
translation_placeholders={'zone': self._zone}
)
return ProcessedCoordinatorData()
return await self.process_api_data(api_data)
async def async_refresh(self) -> None:
"""Refresh data and log errors."""
await self._async_refresh(log_failures=True, raise_on_entry_error=True)
async def _async_animation_data(self, api_data: dict) -> RadarAnimationData:
"""From the API data passed in, call the API to get all the images and create the radar animation data object.
Frames from the API are merged with the background map and the location marker to create each frame."""
animation_data = api_data.get('animation', {}).get('sequence')
localisation_layer_url = api_data.get('animation', {}).get('localisationLayer')
country = api_data.get('country', '')
if animation_data is None or localisation_layer_url is None or not isinstance(animation_data, list):
return RadarAnimationData()
try:
images_from_api = await self.download_images_from_api(animation_data, country, localisation_layer_url)
except IrmKmiApiError:
_LOGGER.warning(f"Could not get images for weather radar")
return RadarAnimationData()
localisation = images_from_api[0]
images_from_api = images_from_api[1:]
| lang = self.hass.config.language if self.hass.config.language in LANGS else 'en' | 6 | 2023-12-17 16:35:01+00:00 | 12k |
v3ucn/Bert-vits2-V2.2 | oldVersion/V111/text/chinese.py | [
{
"identifier": "punctuation",
"path": "oldVersion/V111/text/symbols.py",
"snippet": ""
},
{
"identifier": "ToneSandhi",
"path": "oldVersion/V111/text/tone_sandhi.py",
"snippet": "class ToneSandhi:\n def __init__(self):\n self.must_neural_tone_words = {\n \"麻烦\",\n \"麻利\",\n \"鸳鸯\",\n \"高粱\",\n \"骨头\",\n \"骆驼\",\n \"马虎\",\n \"首饰\",\n \"馒头\",\n \"馄饨\",\n \"风筝\",\n \"难为\",\n \"队伍\",\n \"阔气\",\n \"闺女\",\n \"门道\",\n \"锄头\",\n \"铺盖\",\n \"铃铛\",\n \"铁匠\",\n \"钥匙\",\n \"里脊\",\n \"里头\",\n \"部分\",\n \"那么\",\n \"道士\",\n \"造化\",\n \"迷糊\",\n \"连累\",\n \"这么\",\n \"这个\",\n \"运气\",\n \"过去\",\n \"软和\",\n \"转悠\",\n \"踏实\",\n \"跳蚤\",\n \"跟头\",\n \"趔趄\",\n \"财主\",\n \"豆腐\",\n \"讲究\",\n \"记性\",\n \"记号\",\n \"认识\",\n \"规矩\",\n \"见识\",\n \"裁缝\",\n \"补丁\",\n \"衣裳\",\n \"衣服\",\n \"衙门\",\n \"街坊\",\n \"行李\",\n \"行当\",\n \"蛤蟆\",\n \"蘑菇\",\n \"薄荷\",\n \"葫芦\",\n \"葡萄\",\n \"萝卜\",\n \"荸荠\",\n \"苗条\",\n \"苗头\",\n \"苍蝇\",\n \"芝麻\",\n \"舒服\",\n \"舒坦\",\n \"舌头\",\n \"自在\",\n \"膏药\",\n \"脾气\",\n \"脑袋\",\n \"脊梁\",\n \"能耐\",\n \"胳膊\",\n \"胭脂\",\n \"胡萝\",\n \"胡琴\",\n \"胡同\",\n \"聪明\",\n \"耽误\",\n \"耽搁\",\n \"耷拉\",\n \"耳朵\",\n \"老爷\",\n \"老实\",\n \"老婆\",\n \"老头\",\n \"老太\",\n \"翻腾\",\n \"罗嗦\",\n \"罐头\",\n \"编辑\",\n \"结实\",\n \"红火\",\n \"累赘\",\n \"糨糊\",\n \"糊涂\",\n \"精神\",\n \"粮食\",\n \"簸箕\",\n \"篱笆\",\n \"算计\",\n \"算盘\",\n \"答应\",\n \"笤帚\",\n \"笑语\",\n \"笑话\",\n \"窟窿\",\n \"窝囊\",\n \"窗户\",\n \"稳当\",\n \"稀罕\",\n \"称呼\",\n \"秧歌\",\n \"秀气\",\n \"秀才\",\n \"福气\",\n \"祖宗\",\n \"砚台\",\n \"码头\",\n \"石榴\",\n \"石头\",\n \"石匠\",\n \"知识\",\n \"眼睛\",\n \"眯缝\",\n \"眨巴\",\n \"眉毛\",\n \"相声\",\n \"盘算\",\n \"白净\",\n \"痢疾\",\n \"痛快\",\n \"疟疾\",\n \"疙瘩\",\n \"疏忽\",\n \"畜生\",\n \"生意\",\n \"甘蔗\",\n \"琵琶\",\n \"琢磨\",\n \"琉璃\",\n \"玻璃\",\n \"玫瑰\",\n \"玄乎\",\n \"狐狸\",\n \"状元\",\n \"特务\",\n \"牲口\",\n \"牙碜\",\n \"牌楼\",\n \"爽快\",\n \"爱人\",\n \"热闹\",\n \"烧饼\",\n \"烟筒\",\n \"烂糊\",\n \"点心\",\n \"炊帚\",\n \"灯笼\",\n \"火候\",\n \"漂亮\",\n \"滑溜\",\n \"溜达\",\n \"温和\",\n \"清楚\",\n \"消息\",\n \"浪头\",\n \"活泼\",\n \"比方\",\n \"正经\",\n \"欺负\",\n \"模糊\",\n \"槟榔\",\n \"棺材\",\n \"棒槌\",\n \"棉花\",\n \"核桃\",\n \"栅栏\",\n \"柴火\",\n \"架势\",\n \"枕头\",\n \"枇杷\",\n \"机灵\",\n \"本事\",\n \"木头\",\n \"木匠\",\n \"朋友\",\n \"月饼\",\n \"月亮\",\n \"暖和\",\n \"明白\",\n \"时候\",\n \"新鲜\",\n \"故事\",\n \"收拾\",\n \"收成\",\n \"提防\",\n \"挖苦\",\n \"挑剔\",\n \"指甲\",\n \"指头\",\n \"拾掇\",\n \"拳头\",\n \"拨弄\",\n \"招牌\",\n \"招呼\",\n \"抬举\",\n \"护士\",\n \"折腾\",\n \"扫帚\",\n \"打量\",\n \"打算\",\n \"打点\",\n \"打扮\",\n \"打听\",\n \"打发\",\n \"扎实\",\n \"扁担\",\n \"戒指\",\n \"懒得\",\n \"意识\",\n \"意思\",\n \"情形\",\n \"悟性\",\n \"怪物\",\n \"思量\",\n \"怎么\",\n \"念头\",\n \"念叨\",\n \"快活\",\n \"忙活\",\n \"志气\",\n \"心思\",\n \"得罪\",\n \"张罗\",\n \"弟兄\",\n \"开通\",\n \"应酬\",\n \"庄稼\",\n \"干事\",\n \"帮手\",\n \"帐篷\",\n \"希罕\",\n \"师父\",\n \"师傅\",\n \"巴结\",\n \"巴掌\",\n \"差事\",\n \"工夫\",\n \"岁数\",\n \"屁股\",\n \"尾巴\",\n \"少爷\",\n \"小气\",\n \"小伙\",\n \"将就\",\n \"对头\",\n \"对付\",\n \"寡妇\",\n \"家伙\",\n \"客气\",\n \"实在\",\n \"官司\",\n \"学问\",\n \"学生\",\n \"字号\",\n \"嫁妆\",\n \"媳妇\",\n \"媒人\",\n \"婆家\",\n \"娘家\",\n \"委屈\",\n \"姑娘\",\n \"姐夫\",\n \"妯娌\",\n \"妥当\",\n \"妖精\",\n \"奴才\",\n \"女婿\",\n \"头发\",\n \"太阳\",\n \"大爷\",\n \"大方\",\n \"大意\",\n \"大夫\",\n \"多少\",\n \"多么\",\n \"外甥\",\n \"壮实\",\n \"地道\",\n \"地方\",\n \"在乎\",\n \"困难\",\n \"嘴巴\",\n \"嘱咐\",\n \"嘟囔\",\n \"嘀咕\",\n \"喜欢\",\n \"喇嘛\",\n \"喇叭\",\n \"商量\",\n \"唾沫\",\n \"哑巴\",\n \"哈欠\",\n \"哆嗦\",\n \"咳嗽\",\n \"和尚\",\n \"告诉\",\n \"告示\",\n \"含糊\",\n \"吓唬\",\n \"后头\",\n \"名字\",\n \"名堂\",\n \"合同\",\n \"吆喝\",\n \"叫唤\",\n \"口袋\",\n \"厚道\",\n \"厉害\",\n \"千斤\",\n \"包袱\",\n \"包涵\",\n \"匀称\",\n \"勤快\",\n \"动静\",\n \"动弹\",\n \"功夫\",\n \"力气\",\n \"前头\",\n \"刺猬\",\n \"刺激\",\n \"别扭\",\n \"利落\",\n \"利索\",\n \"利害\",\n \"分析\",\n \"出息\",\n \"凑合\",\n \"凉快\",\n \"冷战\",\n \"冤枉\",\n \"冒失\",\n \"养活\",\n \"关系\",\n \"先生\",\n \"兄弟\",\n \"便宜\",\n \"使唤\",\n \"佩服\",\n \"作坊\",\n \"体面\",\n \"位置\",\n \"似的\",\n \"伙计\",\n \"休息\",\n \"什么\",\n \"人家\",\n \"亲戚\",\n \"亲家\",\n \"交情\",\n \"云彩\",\n \"事情\",\n \"买卖\",\n \"主意\",\n \"丫头\",\n \"丧气\",\n \"两口\",\n \"东西\",\n \"东家\",\n \"世故\",\n \"不由\",\n \"不在\",\n \"下水\",\n \"下巴\",\n \"上头\",\n \"上司\",\n \"丈夫\",\n \"丈人\",\n \"一辈\",\n \"那个\",\n \"菩萨\",\n \"父亲\",\n \"母亲\",\n \"咕噜\",\n \"邋遢\",\n \"费用\",\n \"冤家\",\n \"甜头\",\n \"介绍\",\n \"荒唐\",\n \"大人\",\n \"泥鳅\",\n \"幸福\",\n \"熟悉\",\n \"计划\",\n \"扑腾\",\n \"蜡烛\",\n \"姥爷\",\n \"照顾\",\n \"喉咙\",\n \"吉他\",\n \"弄堂\",\n \"蚂蚱\",\n \"凤凰\",\n \"拖沓\",\n \"寒碜\",\n \"糟蹋\",\n \"倒腾\",\n \"报复\",\n \"逻辑\",\n \"盘缠\",\n \"喽啰\",\n \"牢骚\",\n \"咖喱\",\n \"扫把\",\n \"惦记\",\n }\n self.must_not_neural_tone_words = {\n \"男子\",\n \"女子\",\n \"分子\",\n \"原子\",\n \"量子\",\n \"莲子\",\n \"石子\",\n \"瓜子\",\n \"电子\",\n \"人人\",\n \"虎虎\",\n }\n self.punc = \":,;。?!“”‘’':,;.?!\"\n\n # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041\n # e.g.\n # word: \"家里\"\n # pos: \"s\"\n # finals: ['ia1', 'i3']\n def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:\n # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺\n for j, item in enumerate(word):\n if (\n j - 1 >= 0\n and item == word[j - 1]\n and pos[0] in {\"n\", \"v\", \"a\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[j] = finals[j][:-1] + \"5\"\n ge_idx = word.find(\"个\")\n if len(word) >= 1 and word[-1] in \"吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶\":\n finals[-1] = finals[-1][:-1] + \"5\"\n elif len(word) >= 1 and word[-1] in \"的地得\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 走了, 看着, 去过\n # elif len(word) == 1 and word in \"了着过\" and pos in {\"ul\", \"uz\", \"ug\"}:\n # finals[-1] = finals[-1][:-1] + \"5\"\n elif (\n len(word) > 1\n and word[-1] in \"们子\"\n and pos in {\"r\", \"n\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 桌上, 地下, 家里\n elif len(word) > 1 and word[-1] in \"上下里\" and pos in {\"s\", \"l\", \"f\"}:\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 上来, 下去\n elif len(word) > 1 and word[-1] in \"来去\" and word[-2] in \"上下进出回过起开\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # 个做量词\n elif (\n ge_idx >= 1\n and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in \"几有两半多各整每做是\")\n ) or word == \"个\":\n finals[ge_idx] = finals[ge_idx][:-1] + \"5\"\n else:\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n\n word_list = self._split_word(word)\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n for i, word in enumerate(word_list):\n # conventional neural in Chinese\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals_list[i][-1] = finals_list[i][-1][:-1] + \"5\"\n finals = sum(finals_list, [])\n return finals\n\n def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # e.g. 看不懂\n if len(word) == 3 and word[1] == \"不\":\n finals[1] = finals[1][:-1] + \"5\"\n else:\n for i, char in enumerate(word):\n # \"不\" before tone4 should be bu2, e.g. 不怕\n if char == \"不\" and i + 1 < len(word) and finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n return finals\n\n def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # \"一\" in number sequences, e.g. 一零零, 二一零\n if word.find(\"一\") != -1 and all(\n [item.isnumeric() for item in word if item != \"一\"]\n ):\n return finals\n # \"一\" between reduplication words should be yi5, e.g. 看一看\n elif len(word) == 3 and word[1] == \"一\" and word[0] == word[-1]:\n finals[1] = finals[1][:-1] + \"5\"\n # when \"一\" is ordinal word, it should be yi1\n elif word.startswith(\"第一\"):\n finals[1] = finals[1][:-1] + \"1\"\n else:\n for i, char in enumerate(word):\n if char == \"一\" and i + 1 < len(word):\n # \"一\" before tone4 should be yi2, e.g. 一段\n if finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n # \"一\" before non-tone4 should be yi4, e.g. 一天\n else:\n # \"一\" 后面如果是标点,还读一声\n if word[i + 1] not in self.punc:\n finals[i] = finals[i][:-1] + \"4\"\n return finals\n\n def _split_word(self, word: str) -> List[str]:\n word_list = jieba.cut_for_search(word)\n word_list = sorted(word_list, key=lambda i: len(i), reverse=False)\n first_subword = word_list[0]\n first_begin_idx = word.find(first_subword)\n if first_begin_idx == 0:\n second_subword = word[len(first_subword) :]\n new_word_list = [first_subword, second_subword]\n else:\n second_subword = word[: -len(first_subword)]\n new_word_list = [second_subword, first_subword]\n return new_word_list\n\n def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:\n if len(word) == 2 and self._all_tone_three(finals):\n finals[0] = finals[0][:-1] + \"2\"\n elif len(word) == 3:\n word_list = self._split_word(word)\n if self._all_tone_three(finals):\n # disyllabic + monosyllabic, e.g. 蒙古/包\n if len(word_list[0]) == 2:\n finals[0] = finals[0][:-1] + \"2\"\n finals[1] = finals[1][:-1] + \"2\"\n # monosyllabic + disyllabic, e.g. 纸/老虎\n elif len(word_list[0]) == 1:\n finals[1] = finals[1][:-1] + \"2\"\n else:\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n if len(finals_list) == 2:\n for i, sub in enumerate(finals_list):\n # e.g. 所有/人\n if self._all_tone_three(sub) and len(sub) == 2:\n finals_list[i][0] = finals_list[i][0][:-1] + \"2\"\n # e.g. 好/喜欢\n elif (\n i == 1\n and not self._all_tone_three(sub)\n and finals_list[i][0][-1] == \"3\"\n and finals_list[0][-1][-1] == \"3\"\n ):\n finals_list[0][-1] = finals_list[0][-1][:-1] + \"2\"\n finals = sum(finals_list, [])\n # split idiom into two words who's length is 2\n elif len(word) == 4:\n finals_list = [finals[:2], finals[2:]]\n finals = []\n for sub in finals_list:\n if self._all_tone_three(sub):\n sub[0] = sub[0][:-1] + \"2\"\n finals += sub\n\n return finals\n\n def _all_tone_three(self, finals: List[str]) -> bool:\n return all(x[-1] == \"3\" for x in finals)\n\n # merge \"不\" and the word behind it\n # if don't merge, \"不\" sometimes appears alone according to jieba, which may occur sandhi error\n def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n last_word = \"\"\n for word, pos in seg:\n if last_word == \"不\":\n word = last_word + word\n if word != \"不\":\n new_seg.append((word, pos))\n last_word = word[:]\n if last_word == \"不\":\n new_seg.append((last_word, \"d\"))\n last_word = \"\"\n return new_seg\n\n # function 1: merge \"一\" and reduplication words in it's left and right, e.g. \"听\",\"一\",\"听\" ->\"听一听\"\n # function 2: merge single \"一\" and the word behind it\n # if don't merge, \"一\" sometimes appears alone according to jieba, which may occur sandhi error\n # e.g.\n # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]\n # output seg: [['听一听', 'v']]\n def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n # function 1\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and word == \"一\"\n and i + 1 < len(seg)\n and seg[i - 1][0] == seg[i + 1][0]\n and seg[i - 1][1] == \"v\"\n ):\n new_seg[i - 1][0] = new_seg[i - 1][0] + \"一\" + new_seg[i - 1][0]\n else:\n if (\n i - 2 >= 0\n and seg[i - 1][0] == \"一\"\n and seg[i - 2][0] == word\n and pos == \"v\"\n ):\n continue\n else:\n new_seg.append([word, pos])\n seg = new_seg\n new_seg = []\n # function 2\n for i, (word, pos) in enumerate(seg):\n if new_seg and new_seg[-1][0] == \"一\":\n new_seg[-1][0] = new_seg[-1][0] + word\n else:\n new_seg.append([word, pos])\n return new_seg\n\n # the first and the second words are all_tone_three\n def _merge_continuous_three_tones(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and self._all_tone_three(sub_finals_list[i - 1])\n and self._all_tone_three(sub_finals_list[i])\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n\n return new_seg\n\n def _is_reduplication(self, word: str) -> bool:\n return len(word) == 2 and word[0] == word[1]\n\n # the last char of first word and the first char of second word is tone_three\n def _merge_continuous_three_tones_2(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and sub_finals_list[i - 1][-1][-1] == \"3\"\n and sub_finals_list[i][0][-1] == \"3\"\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if i - 1 >= 0 and word == \"儿\" and seg[i - 1][0] != \"#\":\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_reduplication(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if new_seg and word == new_seg[-1][0]:\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n seg = self._merge_bu(seg)\n try:\n seg = self._merge_yi(seg)\n except:\n print(\"_merge_yi failed\")\n seg = self._merge_reduplication(seg)\n seg = self._merge_continuous_three_tones(seg)\n seg = self._merge_continuous_three_tones_2(seg)\n seg = self._merge_er(seg)\n return seg\n\n def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:\n finals = self._bu_sandhi(word, finals)\n finals = self._yi_sandhi(word, finals)\n finals = self._neural_sandhi(word, pos, finals)\n finals = self._three_sandhi(word, finals)\n return finals"
}
] | import os
import re
import cn2an
import jieba.posseg as psg
from pypinyin import lazy_pinyin, Style
from .symbols import punctuation
from .tone_sandhi import ToneSandhi
from text import chinese_bert
from text.chinese_bert import get_bert_feature | 7,595 |
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
|
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
| tone_modifier = ToneSandhi() | 1 | 2023-12-18 04:54:46+00:00 | 12k |
d-krupke/CP-SAT-Log-Analyzer | app.py | [
{
"identifier": "LogParser",
"path": "cpsat_log_parser/parser.py",
"snippet": "class LogParser:\n def __init__(self, log: typing.Union[str, typing.List[str]]) -> None:\n self.comments, log_without_comments = self._extract_comments(log)\n self.blocks = self.parse_blocks(log_without_comments)\n\n def parse_blocks(\n self, log: typing.Union[str, typing.List[str]]\n ) -> typing.List[LogBlock]:\n \"\"\"\n Parse a log into its blocks.\n \"\"\"\n blocks = []\n sub_parser = ALL_BLOCKS\n for data in _split_log(log):\n for parser in sub_parser:\n if parser.matches(data):\n blocks.append(parser(data))\n break\n else:\n raise ValueError(f\"Could not parse data: {data}\")\n return blocks\n\n def _extract_comments(\n self, log: typing.Union[str, typing.List[str]]\n ) -> typing.Tuple[typing.List[str], typing.List[str]]:\n \"\"\"\n Extract the comments from a log.\n \"\"\"\n if isinstance(log, str):\n log = log.split(\"\\n\")\n if not isinstance(log, list):\n raise TypeError(\"log must be a list or a string\")\n comments = []\n data = []\n for line in log:\n if line.startswith(\"//\"):\n comments.append(line[2:].strip())\n else:\n data.append(line)\n return comments, data\n\n def get_block_of_type(self, block_type: typing.Type[LogBlock]) -> LogBlock:\n for block in self.blocks:\n if isinstance(block, block_type):\n return block\n raise KeyError(f\"Could not find block '{block_type.__name__}'\")"
},
{
"identifier": "SearchProgressBlock",
"path": "cpsat_log_parser/blocks/search_progress.py",
"snippet": "class SearchProgressBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n lines = [line.strip() for line in lines if line.strip()]\n if not lines:\n raise ValueError(\"No lines to parse\")\n if not self.matches(lines):\n raise ValueError(\"Lines do not match SearchProgressBlock\")\n self.lines = lines\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].strip().lower().startswith(\"Starting search\".lower())\n\n def _parse_events(\n self,\n ) -> typing.List[typing.Union[BoundEvent, ObjEvent, ModelEvent]]:\n \"\"\"\n Parse the log file into a list of BoundEvent and ObjEvent.\n \"\"\"\n events = []\n for line in self.lines:\n obj_event = ObjEvent.parse(line)\n if obj_event:\n events.append(obj_event)\n continue\n bound_event = BoundEvent.parse(line)\n if bound_event:\n events.append(bound_event)\n continue\n model_event = ModelEvent.parse(line)\n if model_event:\n events.append(model_event)\n continue\n return events\n\n def get_presolve_time(self) -> float:\n # first line looks like this \"Starting search at 16.74s with 24 workers.\"\n m = re.match(\n r\"Starting [Ss]earch at (?P<time>\\d+\\.\\d+s) with \\d+ workers.\",\n self.lines[0],\n )\n if m:\n return parse_time(m.group(\"time\"))\n raise ValueError(f\"Could not parse presolve time from '{self.lines[0]}'\")\n\n def get_title(self) -> str:\n return \"Search progress:\"\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\nThe search progress log is an essential element of the overall log, crucial for identifying performance bottlenecks. It clearly demonstrates the solver's progression over time and pinpoints where it faces significant challenges. It is important to discern whether the upper or lower bounds are causing issues, or if the solver initially finds a near-optimal solution but struggles to minimize a small remaining gap.\n\nThe structure of the log entries is standardized as follows:\n\n`EVENT NAME\\t|\\tTIME\\t|\\tBEST SOLUTION\\t|\\tRANGE OF THE SEARCH\\t|\\tCOMMENT`\n\nFor instance, an event marked `#2` indicates the discovery of the second solution. Here, you will observe an improvement in the `BEST SOLUTION` metric. A notation like `best:16` confirms that the solver has found a solution with a value of 16.\n\nAn event with `#Bound` denotes an enhancement in the bound, as seen by a reduction in the `RANGE OF THE SEARCH`. A detail such as `next:[7,14]` signifies that the solver is now focused on finding a solution valued between 7 and 14.\n\nThe `COMMENT` section provides essential information about the strategies that led to these improvements.\n\nEvents labeled `#Model` signal modifications to the model, such as fixing certain variables.\n\nTo fully grasp the nuances, zooming into the plot is necessary, especially since the initial values can be quite large. A thorough examination of which sections of the process converge quickest is crucial for a comprehensive understanding.\n \"\"\"\n\n def gap_as_plotly(self) -> typing.Optional[go.Figure]:\n gap_events = [\n e\n for e in self._parse_events()\n if isinstance(e, BoundEvent) or isinstance(e, ObjEvent)\n ]\n\n def is_valid_gap(gap):\n if gap is None:\n return False\n if not math.isfinite(gap):\n return False\n return True\n\n gaps = [(e.time, e.get_gap()) for e in gap_events if is_valid_gap(e.get_gap())]\n fig = go.Figure()\n if not gap_events:\n return None\n # add gaps\n fig.add_trace(\n go.Scatter(\n x=[t for t, _ in gaps],\n y=[gap for _, gap in gaps],\n mode=\"lines+markers\",\n line=dict(color=\"purple\"),\n name=\"Gap\",\n hovertext=[e.msg for e in gap_events],\n )\n )\n # make the x-axis start at 0\n fig.update_xaxes(range=[0, 1.01 * gaps[-1][0]])\n max_gap = max(gap for _, gap in gaps if gap is not None)\n # make the y-axis start at 0\n fig.update_yaxes(range=[-1, min(300, 1.01 * max_gap)])\n fig.update_layout(\n title=\"Optimality Gap\",\n xaxis_title=\"Time (s)\",\n yaxis_title=\"Gap (%)\",\n legend_title=\"Legend\",\n font=dict(family=\"Courier New, monospace\", size=18, color=\"RebeccaPurple\"),\n )\n return fig\n\n def model_changes_as_plotly(self) -> typing.Optional[go.Figure]:\n \"\"\"\n Plot the model changes in percent over time.\n \"\"\"\n model_events = [e for e in self._parse_events() if isinstance(e, ModelEvent)]\n fig = go.Figure()\n if not model_events:\n return None\n # add number of vars\n fig.add_trace(\n go.Scatter(\n x=[e.time for e in model_events],\n y=[100 * (e.vars_remaining / e.vars) for e in model_events],\n mode=\"lines+markers\",\n line=dict(color=\"green\"),\n name=\"Variables\",\n hovertext=[e.msg for e in model_events],\n )\n )\n # add number of constraints\n fig.add_trace(\n go.Scatter(\n x=[e.time for e in model_events],\n y=[100 * (e.constr_remaining / e.constr) for e in model_events],\n mode=\"lines+markers\",\n line=dict(color=\"orange\"),\n name=\"Constraints\",\n hovertext=[e.msg for e in model_events],\n )\n )\n # make the x-axis start at 0\n fig.update_xaxes(range=[0, 1.01 * model_events[-1].time])\n # make the y-axis range from 0 to 100\n fig.update_yaxes(range=[0, 101])\n fig.update_layout(\n title=\"Model changes\",\n xaxis_title=\"Time (s)\",\n yaxis_title=\"Remaining (%)\",\n legend_title=\"Legend\",\n font=dict(family=\"Courier New, monospace\", size=18, color=\"RebeccaPurple\"),\n )\n return fig\n\n def as_plotly(self) -> typing.Optional[go.Figure]:\n \"\"\"\n Plot the progress of the solver.\n \"\"\"\n events = self._parse_events()\n obj_events = [e for e in events if isinstance(e, ObjEvent)]\n bound_events = [e for e in events if isinstance(e, BoundEvent)]\n fig = go.Figure()\n if not obj_events and not bound_events:\n return None\n max_time = max([e.time for e in bound_events + obj_events])\n\n # make sure that both bounds and objs have a value at max_time\n if obj_events and obj_events[-1].time < max_time:\n if bound_events[-1].obj is None:\n # Should nearly never happen\n obj_events.append(\n ObjEvent(\n time=max_time,\n obj=obj_events[-1].obj,\n bound=bound_events[-1].bound,\n msg=\"\",\n )\n )\n else:\n obj_events.append(\n ObjEvent(\n time=max_time,\n obj=bound_events[-1].obj,\n bound=bound_events[-1].bound,\n msg=\"\",\n )\n )\n if bound_events and bound_events[-1].time < max_time:\n bound_events.append(\n BoundEvent(\n time=max_time,\n obj=obj_events[-1].obj,\n bound=obj_events[-1].bound,\n msg=\"\",\n )\n )\n\n # plot the bounds over time. Add the comment as hover text\n fig.add_trace(\n go.Scatter(\n x=[b.time for b in bound_events],\n y=[b.bound for b in bound_events],\n mode=\"lines+markers\",\n line=dict(color=\"cyan\"),\n name=\"Bound\",\n hovertext=[b.msg for b in bound_events],\n )\n )\n\n # plot the objective values over time. Add the comment as hover text\n fig.add_trace(\n go.Scatter(\n x=[o.time for o in obj_events],\n y=[o.obj for o in obj_events],\n mode=\"lines+markers\",\n line=dict(color=\"red\"),\n name=\"Objective\",\n hovertext=[o.msg for o in obj_events],\n )\n )\n\n # make the x-axis start at 0\n fig.update_xaxes(range=[0, 1.01 * max_time])\n fig.update_layout(\n title=\"Search progress\",\n xaxis_title=\"Time (s)\",\n yaxis_title=\"Objective\",\n legend_title=\"Legend\",\n font=dict(family=\"Courier New, monospace\", size=18, color=\"RebeccaPurple\"),\n )\n return fig"
},
{
"identifier": "SearchStatsBlock",
"path": "cpsat_log_parser/blocks/search_stats.py",
"snippet": "class SearchStatsBlock(TableBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n if not lines[0].startswith(\"Search stats\"):\n raise ValueError(f\"Not a valid progress log. First line: {lines[0]}\")\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].strip().startswith(\"Search stats\")\n\n def get_title(self) -> str:\n return \"Search Strategies: Statistics\"\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\n This table gives you some statistics on the different search strategies.\n How many variables where in the search space, how many conflicts were found, how many branches were executed, how often was the search restarted, and how often where the boolean and integer propagators applied.\n \"\"\""
},
{
"identifier": "SolutionsBlock",
"path": "cpsat_log_parser/blocks/solutions.py",
"snippet": "class SolutionsBlock(TableBlock):\n \"\"\"\n\n Not available for older versions of CP-SAT.\n \"\"\"\n\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n if not self.matches(lines):\n raise ValueError(f\"Not a valid progress log. First line: {lines[0]}\")\n\n def get_num_solutions(self) -> int:\n # First line looks like this \"Solutions (11) Num Rank\"\n # We want to get the number in the parentheses\n return int(self.lines[0].split(\"(\")[1].split(\")\")[0])\n\n def get_title(self) -> str:\n return \"Solutions\"\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\n Which strategy found the most solutions?\n The rank indicates how good the found solutions are.\n Ranks with `[1,X]` indicate an optimal solution.\n \"\"\"\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n # \"Solutions (11) Num Rank\"\n match = re.match(r\"Solutions\\s+\\(\\d+\\)\\s+Num\\s+Rank\", lines[0])\n return bool(match)"
},
{
"identifier": "TableBlock",
"path": "cpsat_log_parser/blocks/tables.py",
"snippet": "class TableBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n if not lines:\n raise ValueError(\"No lines to parse\")\n self.lines = lines\n self._df = None\n\n def to_pandas(self) -> pd.DataFrame:\n \"\"\"\n Parse the table into a pandas DataFrame.\n \"\"\"\n log = \"\\n\".join((line.strip() for line in self.lines))\n # Replace the single quotes with nothing\n log = log.replace(\"'\", \"\")\n\n # Replace two or more spaces with a single tab\n log = re.sub(\"\\s\\s+\", \"\\t\", log)\n\n # Use StringIO to convert the string to a file-like object for read_csv\n log_file = StringIO(log)\n\n df = pd.read_csv(log_file, delimiter=\"\\t\", index_col=0)\n return df"
},
{
"identifier": "SolverBlock",
"path": "cpsat_log_parser/blocks/solver.py",
"snippet": "class SolverBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n\n def _parse_parameters(self, line: str) -> typing.Dict:\n \"\"\"\n\n The parameters line can look like this:\n \"Parameters: log_search_progress: true use_timetabling_in_no_overlap_2d: true use_energetic_reasoning_in_no_overlap_2d: true use_pairwise_reasoning_in_no_overlap_2d: true\"\n \"\"\"\n parameters = {}\n line = line[len(\"Parameters:\") :]\n for match in re.finditer(r\"(?P<key>\\w+): (?P<value>[^ ]+)\", line):\n parameters[match.group(\"key\")] = match.group(\"value\")\n return parameters\n\n def get_title(self) -> str:\n return \"Solver Information\"\n\n def get_help(self) -> str:\n return \"\"\"This block contains basic information about the solver.\n As CP-SAT is still under active development and makes serious improvements with every release, it is important to know which version of the solver was used.\n The number of workers, i.e., the level of parallelism, is also important to know.\n CP-SAT is a portfolio solver and the higher the number of workers, the more strategies are used.\n You can find an overview of the different tiers activated by the number of workers in the [CP-SAT documentation](https://github.com/google/or-tools/blob/main/ortools/sat/docs/troubleshooting.md#improving-performance-with-multiple-workers).\n While you should be careful with tinkering with the parameters (they have sensible defaults), it is still good to know which parameters were used.\n All of these information are actually already shown in the overview.\n \"\"\"\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].strip().startswith(\"Starting CP-SAT solver\")\n\n def get_parameters(self) -> typing.Dict:\n for line in self.lines:\n if line.startswith(\"Parameters:\"):\n return self._parse_parameters(line)\n raise ValueError(\"No parameters found\")\n\n def get_number_of_workers(self) -> int:\n # the line looks like this: \"Setting number of workers to 24\"\n for line in self.lines:\n if line.startswith(\"Setting number of workers to\"):\n return int(line.strip().split(\" \")[-1])\n # If `num_search_workers` is set, the number of workers is not shown in the log.\n if \"num_search_workers\" in self.get_parameters():\n return int(self.get_parameters()[\"num_search_workers\"])\n raise ValueError(\"No number of workers found\")\n\n def get_version(self) -> str:\n # the line looks like this: \"Starting CP-SAT solver v9.7.2996\"\n for line in self.lines:\n if line.startswith(\"Starting CP-SAT solver\"):\n return line.strip().split(\" \")[-1]\n raise ValueError(\"No version found\")\n\n def get_parsed_version(self) -> typing.Tuple[int, int, int]:\n # the line looks like this: \"Starting CP-SAT solver v9.7.2996\"\n version = self.get_version()[1:]\n major, minor, patch = version.split(\".\")\n return int(major), int(minor), int(patch)"
},
{
"identifier": "ResponseBlock",
"path": "cpsat_log_parser/blocks/solver_response.py",
"snippet": "class ResponseBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].startswith(\"CpSolverResponse\")\n\n def get_title(self) -> str:\n return \"CpSolverResponse\"\n\n def to_dict(self) -> dict:\n d = {}\n for line in self.lines:\n if line.startswith(\"CpSolverResponse\"):\n continue\n key, value = line.split(\":\")\n key = key.strip()\n value = value.strip()\n if key == \"status\":\n value = value.split(\" \")[0]\n d[key] = value\n return d\n\n def get_gap(self):\n vals = self.to_dict()\n try:\n obj = float(vals[\"objective\"])\n bound = float(vals[\"best_bound\"])\n except TypeError:\n return None\n except ValueError:\n return None\n return 100 * (abs(obj - bound) / max(1, abs(obj)))\n\n def to_pandas(self) -> pd.DataFrame:\n return pd.DataFrame([self.to_dict()])\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\n This final block of the log contains a summary by the solver.\n Here you find the most important information, such as how successful the search was.\n\n You can find the original documentation [here](https://github.com/google/or-tools/blob/8768ed7a43f8899848effb71295a790f3ecbe2f2/ortools/sat/cp_model.proto#L720).\n \"\"\""
},
{
"identifier": "PresolveLogBlock",
"path": "cpsat_log_parser/blocks/presolve_log.py",
"snippet": "class PresolveLogBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].strip().startswith(\"Starting presolve at\")\n\n def get_title(self) -> str:\n return \"Presolve Log\"\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\n This block contains the presolve log.\n It contains information about the presolve steps and the time they took.\n\n There are multiple rounds of domain reduction, expansion, equivalence\n checking, substitution, and probing performed during presolve.\n These steps can take some time, but they can also significantly reduce\n the model size and the search space and thus the time it takes to find\n a solution. Usually, the summary is sufficient to look at to see what happened.\n\n However, you may still want to scroll over the log for messages like\n `removed duplicate constraint`, indicating redundancies (and possibly bugs)\n in you model building.\n \"\"\""
},
{
"identifier": "PresolvedModelBlock",
"path": "cpsat_log_parser/blocks/presolved_model.py",
"snippet": "class PresolvedModelBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n if re.match(r\"Presolved (satisfaction|optimization) model\", lines[0]):\n return True\n return False\n\n def get_title(self) -> str:\n return \"Presolved Model\"\n\n def get_model_fingerprint(self) -> str:\n return self.lines[0].split(\"model_fingerprint: \")[1].strip(\")\")\n\n def get_num_variables(self) -> int:\n return int(\n self.lines[1]\n .split(\"#Variables: \")[1]\n .strip()\n .split(\" \")[0]\n .replace(\"'\", \"\")\n )\n\n def get_num_constraints(self) -> int:\n n = 0\n for line in self.lines:\n if line.startswith(\"#k\"):\n # \"#kNoOverlap2D: 1 (#rectangles: 24)\"\n # \"#kInterval: 48\"\n n += int(line.split(\":\")[1].strip().split(\" \")[0].replace(\"'\", \"\"))\n return n\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\n This is the most important block of the presolve phase and gives an overview of the model after presolve.\n It contains the number of variables and constraints, as well as coefficients and domains.\n\n `- 200 in [0,199]` will indicate that there are 200 variables with domain `[0,199]`, i.e., values between 0 and 199.\n\n `#kLinearN: 3'000 (#terms: 980'948)` indicates that there are 3000 linear constraints with 980'948 coefficients.\n\n It is useful to compare this to the initial model, to see if your\n model was simplified by presolve, which indicates that you can\n simplify your model yourself, saving presolve time. If you notice that a\n lot of time is spent in presolve but it does not simplify your model,\n you can try to disable/reduce presolve.\n\n It is also interesting to see if the presolve replaced some of your\n constraints with more efficient ones.\n \"\"\""
},
{
"identifier": "TaskTimingBlock",
"path": "cpsat_log_parser/blocks/task_timing.py",
"snippet": "class TaskTimingBlock(LogBlock):\n def __init__(self, lines: List[str]) -> None:\n super().__init__(lines)\n if not self.matches(lines):\n raise ValueError(\"Invalid lines for TaskTimingBlock\")\n\n @staticmethod\n def matches(lines: List[str]) -> bool:\n if not lines:\n return False\n return lines[0].startswith(\"Task timing\")\n\n def get_help(self) -> typing.Optional[str]:\n return \"The time spent on each subsolver. Does not give much useful information for the common user.\"\n\n def get_title(self) -> str:\n return \"Task Timing\"\n\n def to_pandas(self, deterministic: bool) -> pd.DataFrame:\n lines = [line.strip() for line in self.lines if line.strip()]\n lines = [line.replace(\"'\", \"\") for line in lines]\n lines = [line.replace(\"[\", \" \") for line in lines]\n lines = [line.replace(\"]\", \" \") for line in lines]\n lines = [line.replace(\",\", \" \") for line in lines]\n lines = [line.replace(\"\\t\", \" \") for line in lines]\n lines = [line.replace(\"s \", \"s \") for line in lines]\n lines = [re.sub(\"\\s\\s+\", \"\\t\", line) for line in lines]\n\n def filter(line):\n split_line = line.split(\"\\t\")\n n = len(split_line)\n if deterministic:\n return \"\\t\".join(split_line[:1] + split_line[n // 2 + 1 :])\n else:\n return \"\\t\".join(split_line[: n // 2 + 1])\n\n lines = [filter(line) for line in lines]\n if deterministic:\n lines[0] = lines[0].replace(\"Task timing\", \"Task timing (deterministic)\")\n\n # Replace two or more spaces with a single tab\n log = \"\\n\".join(lines)\n log = re.sub(\"\\s\\s+\", \"\\t\", log)\n\n # Use StringIO to convert the string to a file-like object for read_csv\n log_file = StringIO(log)\n\n df = pd.read_csv(log_file, delimiter=\"\\t\", index_col=0)\n return df"
},
{
"identifier": "input_log",
"path": "_app/input_log.py",
"snippet": "def input_log():\n # accept log via file upload or text input\n data = None\n log_file = st.file_uploader(\"Upload a log file\", type=\"txt\")\n if log_file is not None:\n data = log_file.read().decode(\"utf-8\")\n else:\n log_text = st.text_area(\"Or paste a log here\")\n if log_text:\n data = log_text\n url = st.text_input(\"Or load a log from a URL:\", value=\"\")\n if url:\n data = get_data_from_url(url)\n # example logs per button\n st.markdown(\"Or use one of the following example logs:\")\n examples = [\n {\n \"file\": \"example_logs/98_02.txt\",\n \"origin\": \"This log originates from a TSP with MTZ constraints. It is not solved to optimality.\",\n },\n {\n \"file\": \"example_logs/98_03.txt\",\n \"origin\": \"This log originates from a TSP with AddCircuit constraint. It only has a single, but expensive, constraint.\",\n },\n {\n \"file\": \"example_logs/98_04.txt\",\n \"origin\": \"This log originates from a Multi-Knapsack problem.\",\n },\n {\n \"file\": \"example_logs/98_05.txt\",\n \"origin\": \"This log originates from a Packing problem.\",\n },\n {\n \"file\": \"example_logs/98_06.txt\",\n \"origin\": \"This log originates from a Packing problem.\",\n },\n {\n \"file\": \"example_logs/98_07.txt\",\n \"origin\": \"This log originates from a Knapsack problem run on an old Macbook. It spends most of the time in presolve.\",\n },\n {\n \"file\": \"example_logs/98_08.txt\",\n \"origin\": \"An example from an iteration of SampLNS\",\n },\n {\n \"file\": \"example_logs/97_01.txt\",\n \"origin\": \"This was an example log flying around on my computer for teaching purposes.\",\n },\n ]\n # at most 5 examples per row\n row_length = 4\n for i in range(0, len(examples), row_length):\n cols = st.columns(min(len(examples) - i, row_length))\n for j, example in enumerate(examples[i : i + row_length]):\n if cols[j].button(f\"Example {i+j+1}\", help=example.get(\"origin\", None)):\n with open(example[\"file\"]) as f:\n data = f.read()\n\n if not data and \"from_url\" in st.query_params:\n url = st.query_params.get_all(\"from_url\")[0]\n data = get_data_from_url(url)\n if not data and \"example\" in st.query_params:\n example = st.query_params.get_all(\"example\")[0]\n import urllib.request\n import urllib.parse\n\n url = \"https://cpsat-log-analyzer.streamlit.app/?\" + urllib.parse.urlencode(\n {\"example\": example}\n )\n st.info(\n f\"Loading example log `{example}`. You can share it with others using [{url}]({url}).\"\n )\n if \"/\" in example:\n st.error(f\"Invalid example log `{example}`.\")\n return None\n example_path = f\"example_logs/{example}.txt\"\n if not os.path.dirname(example_path).endswith(\"example_logs\"):\n st.error(f\"Invalid example log `{example}`.\")\n return None\n if not os.path.exists(example_path):\n st.error(f\"Example log `{example}` does not exist.\")\n return None\n with open(f\"example_logs/{example}.txt\") as f:\n data = f.read()\n return data"
},
{
"identifier": "print_header",
"path": "_app/header.py",
"snippet": "def print_header():\n st.title(\"CP-SAT Log Analyzer\")\n st.markdown(\n \"Dive into the world of constraint programming with ease using our CP-SAT Log Analyzer. This tool transforms the dense and detailed logs of CP-SAT into clear, readable formats, complemented by intuitive visualizations of key metrics. Whether you're tuning your model or exploring data, our analyzer simplifies and enlightens your journey with CP-SAT. Let us make complex logs simple and actionable!\"\n )\n\n st.markdown(\n \"[](https://github.com/d-krupke/CP-SAT-Log-Analyzer) Feel free to open issues or contribute.\"\n )\n st.markdown(\n \"[](https://github.com/d-krupke/cpsat-primer) This project is a sibling of the CP-SAT Primer.\"\n )\n\n st.header(\"Log File\")\n st.markdown(\n \"\"\"\n To begin analyzing with CP-SAT Log Analyzer, please upload your log file. If you haven't already, you can generate a log file by enabling the log output. Simply set the `log_search_progress` parameter to `True` in your CP-SAT solver configuration. Once this is done, you'll have a detailed log ready for upload and analysis.\n\n The log usually starts as follows:\n ```\n Starting CP-SAT solver v9.7.2996\n Parameters: log_search_progress: true\n Setting number of workers to 24\n\n ...\n ```\n\n Only complete and properly formatted logs are supported for now.\n \"\"\"\n )"
},
{
"identifier": "show_overview",
"path": "_app/overview.py",
"snippet": "def show_overview(parser):\n st.subheader(\"Overview\", divider=True)\n if parser.comments:\n with st.chat_message(\"user\"):\n # escape markdown to prevent XSS\n comment = \"\\n\".join(parser.comments)\n comment = comment.replace(\"\\\\\", \"\")\n comment = comment.replace(\"[\", \"\\\\[*\")\n comment = comment.replace(\"]\", \"*\\\\]\")\n st.write(comment)\n try:\n solver_block = parser.get_block_of_type(SolverBlock)\n initial_model_block = parser.get_block_of_type(InitialModelBlock)\n search_progress_block = parser.get_block_of_type(SearchProgressBlock)\n response_block = parser.get_block_of_type(ResponseBlock)\n col1, col2 = st.columns(2)\n major, minor, patch = solver_block.get_parsed_version()\n if major < 9 or (major == 9 and minor < 8):\n col1.metric(\n label=\"CP-SAT Version\",\n value=solver_block.get_version(),\n help=\"CP-SAT has seen significant performance improvements over the last years. Make sure to use the latest version.\",\n delta=\"outdated\",\n delta_color=\"inverse\",\n )\n else:\n col1.metric(\n label=\"CP-SAT Version\",\n value=solver_block.get_version(),\n help=\"CP-SAT has seen significant performance improvements over the last years. Make sure to use the latest version.\",\n )\n col2.metric(\n label=\"Number of workers\",\n value=solver_block.get_number_of_workers(),\n help=\"CP-SAT has different parallelization tiers, triggered by the number of workers. More workers can improve performance. Fine more information [here](https://github.com/google/or-tools/blob/main/ortools/sat/docs/troubleshooting.md#improving-performance-with-multiple-workers)\",\n )\n # https://github.com/google/or-tools/blob/main/ortools/sat/docs/troubleshooting.md#improving-performance-with-multiple-workers\n\n # print all parameters (key: value)\n if solver_block.get_parameters():\n md = \"*CP-SAT was setup with the following parameters:*\\n\"\n st.markdown(md)\n st.json(solver_block.get_parameters())\n st.markdown(\n \"*You can find more information about the parameters [here](https://github.com/google/or-tools/blob/stable/ortools/sat/sat_parameters.proto).*\"\n )\n\n col1, col2, col3 = st.columns(3)\n response = response_block.to_dict()\n\n col1.metric(\n label=\"Status\",\n value=response[\"status\"],\n help=\"\"\"\n CP-SAT can have 5 different statuses:\n - `UNKNOWN`: The solver timed out before finding a solution or proving infeasibility.\n - `OPTIMAL`: The solver found an optimal solution. This is the best possible status.\n - `FEASIBLE`: The solver found a feasible solution, but it is not guaranteed to be optimal.\n - `INFEASIBLE`: The solver proved that the problem is infeasible. This often indicates a bug in the model.\n - `MODEL_INVALID`: Definitely a bug. Should rarely happen.\n \"\"\",\n )\n col2.metric(\n label=\"Time\",\n value=f\"{float(response['walltime']):.3f}s\",\n help=\"The total time spent by the solver. This includes the time spent in presolve and the time spent in the search.\",\n )\n col3.metric(\n label=\"Presolve\",\n value=f\"{search_progress_block.get_presolve_time():.3f}s\",\n help=\"The time spent in presolve. This is usually a small fraction of the total time.\",\n )\n\n col1, col2, col3 = st.columns(3)\n col1.metric(\n label=\"Variables\",\n value=initial_model_block.get_num_variables(),\n help=\"CP-SAT can handle (hundreds of) thousands of variables. This just gives a rough estimate of the size of the problem. Check *Initial Optimization Model* for more information. Many variables may also be removed during presolve, check *Presolve Summary*.\",\n )\n col2.metric(\n label=\"Constraints\",\n value=initial_model_block.get_num_constraints(),\n help=\"CP-SAT can handle (hundreds of) thousands of constraints. More important than the number is the type of constraints. Some constraints are more expensive than others. Check *Initial Optimization Model* for more information.\",\n )\n col3.metric(\n label=\"Type\",\n value=\"Optimization\"\n if initial_model_block.is_optimization()\n else \"Satisfaction\",\n help=\"Is the model an optimization or satisfaction model?\",\n )\n # col3.metric(\"Model Fingerprint\", value=initial_model_block.get_model_fingerprint())\n\n col1, col2, col3 = st.columns(3)\n try:\n obj = float(response[\"objective\"])\n except ValueError:\n obj = None\n col1.metric(\n label=\"Objective\",\n value=obj,\n help=\"Value of the best solution found.\",\n )\n try:\n bound = float(response[\"best_bound\"])\n except ValueError:\n bound = None\n col2.metric(\n label=\"Best bound\",\n value=bound,\n help=\"Bound on how good the best solution can be. If it matches the objective, the solution is optimal.\",\n )\n gap = response_block.get_gap()\n gap_help = \"The gap is the difference between the objective and the best bound. The smaller the better. A gap of 0% means that the solution is optimal.\"\n if gap is None:\n col3.metric(label=\"Gap\", value=None, help=gap_help)\n else:\n col3.metric(label=\"Gap\", value=f\"{gap:.2f}%\", help=gap_help)\n if response[\"status\"] == \"OPTIMAL\" and gap > 0:\n st.error(\n \"CP-SAT returned the status `OPTIMAL`, but does not have a matching bound. This indicates a bug.\"\n )\n\n if (\n response[\"status\"] in (\"OPTIMAL\", \"FEASIBLE\")\n and initial_model_block.is_optimization()\n ):\n fig = search_progress_block.as_plotly()\n if fig:\n st.plotly_chart(fig, use_container_width=True)\n except KeyError as ke:\n st.error(\n f\"Error parsing information. Log seems to be incomplete: {ke}. Make sure you enter the full log without any modifications. The parser is sensitive to new lines.\"\n )"
}
] | import streamlit as st
from cpsat_log_parser import LogParser
from cpsat_log_parser.blocks import (
SearchProgressBlock,
SearchStatsBlock,
SolutionsBlock,
TableBlock,
SolverBlock,
ResponseBlock,
PresolveLogBlock,
TaskTimingBlock,
PresolvedModelBlock,
)
from _app import print_header, input_log, show_overview | 9,886 | """
This file is the main entry point for the Streamlit app.
Further parts of the app are in the `_app` folder.
The logic for parsing the log is in the `cpsat_log_parser` folder.
"""
print_header()
data = input_log()
if data:
st.header("Log Analysis")
st.warning(
"This is just a prototype and may crash or show wrong results. Please report any issues [here](https://github.com/d-krupke/CP-SAT-Log-Analyzer). I welcome any feedback and complex logs to test this on."
)
parser = LogParser(data)
show_overview(parser)
st.markdown("*You can expand the following block to see the raw log.*")
with st.expander("Raw Log"):
st.text(data)
st.markdown(
"*The following part contains a parsed version of the log, easier for analysis. Depending on the CP-SAT version, not all parts may be parsed properly.*"
)
for block in parser.blocks:
try:
if isinstance(block, SearchProgressBlock):
st.subheader("Search", divider=True)
with st.expander(block.get_title(), expanded=True):
if block.get_help():
st.info(block.get_help())
st.text(str(block))
fig = block.as_plotly()
if fig:
st.plotly_chart(fig, use_container_width=True)
st.info(
"This plot shows you how the quality of the solution (objective), and the proved quality (bound) converge over time. It allows you to estimate if finding good solutions or proving optimality is the bottleneck."
)
fig_3 = block.gap_as_plotly()
if fig_3:
st.plotly_chart(fig_3, use_container_width=True)
st.info(
"This plot shows you how the gap between the objective and the bound changes over time. If it quickly reaches a small value but then does not improve for a long time, you could set the `relative_gap_limit` parameter to allow to stop the search as soon as a specific solution quality is reached."
)
fig_2 = block.model_changes_as_plotly()
if fig_2:
st.plotly_chart(fig_2, use_container_width=True)
st.info(
"This plot shows you how the size of the model changes over time."
)
st.subheader("Statistics", divider=True)
st.info(
"This part contains detailed statistics about the search. Only a few elements are useful for the common user."
)
elif isinstance(block, SolverBlock):
st.subheader("Initialization", divider=True)
st.info(
"This block contains some basic information about the solver and the model. For example, you can check how large the model is which parameters were changed."
)
with st.expander(block.get_title()):
if block.get_help():
st.info(block.get_help())
st.text(str(block))
elif isinstance(block, SearchStatsBlock):
with st.expander(block.get_title()):
if block.get_help():
st.info(block.get_help())
df = block.to_pandas()
st.dataframe(
df,
column_config={
"Restarts": st.column_config.NumberColumn(
help="Restarting the search once we learned about the importance of variables can significantly reduce the size of the search tree."
),
},
)
elif isinstance(block, TaskTimingBlock):
with st.expander(block.get_title()):
if block.get_help():
st.info(block.get_help())
tab1, tab2 = st.tabs(["Table", "Raw"])
df_1 = block.to_pandas(deterministic=False)
tab1.dataframe(df_1, use_container_width=True)
df_2 = block.to_pandas(deterministic=True)
tab1.dataframe(df_2, use_container_width=True)
tab2.text(str(block))
elif isinstance(block, SolutionsBlock):
with st.expander(block.get_title()):
if block.get_help():
st.info(block.get_help())
st.markdown(f"Number of solutions: {block.get_num_solutions()}")
df = block.to_pandas()
st.dataframe(df, use_container_width=True)
elif isinstance(block, PresolvedModelBlock):
with st.expander(block.get_title(), expanded=True):
if block.get_help():
st.info(block.get_help())
st.text(str(block))
elif isinstance(block, ResponseBlock):
st.subheader("Summary", divider=True)
with st.expander(block.get_title(), expanded=True):
if block.get_help():
st.info(block.get_help())
df = block.to_pandas()
st.dataframe(df.transpose(), use_container_width=True)
| """
This file is the main entry point for the Streamlit app.
Further parts of the app are in the `_app` folder.
The logic for parsing the log is in the `cpsat_log_parser` folder.
"""
print_header()
data = input_log()
if data:
st.header("Log Analysis")
st.warning(
"This is just a prototype and may crash or show wrong results. Please report any issues [here](https://github.com/d-krupke/CP-SAT-Log-Analyzer). I welcome any feedback and complex logs to test this on."
)
parser = LogParser(data)
show_overview(parser)
st.markdown("*You can expand the following block to see the raw log.*")
with st.expander("Raw Log"):
st.text(data)
st.markdown(
"*The following part contains a parsed version of the log, easier for analysis. Depending on the CP-SAT version, not all parts may be parsed properly.*"
)
for block in parser.blocks:
try:
if isinstance(block, SearchProgressBlock):
st.subheader("Search", divider=True)
with st.expander(block.get_title(), expanded=True):
if block.get_help():
st.info(block.get_help())
st.text(str(block))
fig = block.as_plotly()
if fig:
st.plotly_chart(fig, use_container_width=True)
st.info(
"This plot shows you how the quality of the solution (objective), and the proved quality (bound) converge over time. It allows you to estimate if finding good solutions or proving optimality is the bottleneck."
)
fig_3 = block.gap_as_plotly()
if fig_3:
st.plotly_chart(fig_3, use_container_width=True)
st.info(
"This plot shows you how the gap between the objective and the bound changes over time. If it quickly reaches a small value but then does not improve for a long time, you could set the `relative_gap_limit` parameter to allow to stop the search as soon as a specific solution quality is reached."
)
fig_2 = block.model_changes_as_plotly()
if fig_2:
st.plotly_chart(fig_2, use_container_width=True)
st.info(
"This plot shows you how the size of the model changes over time."
)
st.subheader("Statistics", divider=True)
st.info(
"This part contains detailed statistics about the search. Only a few elements are useful for the common user."
)
elif isinstance(block, SolverBlock):
st.subheader("Initialization", divider=True)
st.info(
"This block contains some basic information about the solver and the model. For example, you can check how large the model is which parameters were changed."
)
with st.expander(block.get_title()):
if block.get_help():
st.info(block.get_help())
st.text(str(block))
elif isinstance(block, SearchStatsBlock):
with st.expander(block.get_title()):
if block.get_help():
st.info(block.get_help())
df = block.to_pandas()
st.dataframe(
df,
column_config={
"Restarts": st.column_config.NumberColumn(
help="Restarting the search once we learned about the importance of variables can significantly reduce the size of the search tree."
),
},
)
elif isinstance(block, TaskTimingBlock):
with st.expander(block.get_title()):
if block.get_help():
st.info(block.get_help())
tab1, tab2 = st.tabs(["Table", "Raw"])
df_1 = block.to_pandas(deterministic=False)
tab1.dataframe(df_1, use_container_width=True)
df_2 = block.to_pandas(deterministic=True)
tab1.dataframe(df_2, use_container_width=True)
tab2.text(str(block))
elif isinstance(block, SolutionsBlock):
with st.expander(block.get_title()):
if block.get_help():
st.info(block.get_help())
st.markdown(f"Number of solutions: {block.get_num_solutions()}")
df = block.to_pandas()
st.dataframe(df, use_container_width=True)
elif isinstance(block, PresolvedModelBlock):
with st.expander(block.get_title(), expanded=True):
if block.get_help():
st.info(block.get_help())
st.text(str(block))
elif isinstance(block, ResponseBlock):
st.subheader("Summary", divider=True)
with st.expander(block.get_title(), expanded=True):
if block.get_help():
st.info(block.get_help())
df = block.to_pandas()
st.dataframe(df.transpose(), use_container_width=True) | elif isinstance(block, PresolveLogBlock): | 7 | 2023-12-18 09:18:19+00:00 | 12k |
MMC-K/multimodal_generation_downstream_tasks | testing_veldt5_accelerate_bg.py | [
{
"identifier": "DatasetForVLAlign",
"path": "data_utils.py",
"snippet": "class DatasetForVLAlign(Dataset):\n def __init__(\n self,\n file_path: str,\n image_tokenizer: ViTFeatureExtractor,\n text_tokenizer: AutoTokenizer,\n image_root_dir=None,\n text_max_length=512,\n ):\n super().__init__()\n self.file_path = file_path\n self.image_tokenizer = image_tokenizer\n self.text_tokenizer = text_tokenizer\n self.image_root_dir=image_root_dir\n self.text_max_length = text_max_length\n\n logger.info(\"loading dataset...\")\n self.data = json.load(open(file_path, \"r\"))\n logger.info(\"{} examples was loaded.\".format(len(self.data)))\n\n def __getitem__(self, index):\n sample = self.data[index]\n\n path = sample[\"path\"]\n if self.image_root_dir is not None:\n path = os.path.join(self.image_root_dir, path)\n \n description = sample[\"description\"]\n\n image = Image.open(path)\n\n image_feature = self.image_tokenizer(images=image, return_tensors=\"pt\")\n text_feature = self.text_tokenizer(description, return_tensors=\"pt\", truncation=True, max_length=self.text_max_length)\n\n return {\n \"pixel_values\": image_feature[\"pixel_values\"],\n \"input_ids\": text_feature[\"input_ids\"],\n \"attention_mask\": text_feature[\"attention_mask\"],\n }\n\n def __len__(self):\n return len(self.data)\n\n def get_collate_fn(self):\n def collate_fn(samples, pad_id=0):\n if len(samples) == 0:\n return {}\n return {\n \"input_ids\": collate_tokens([s[\"input_ids\"] for s in samples], pad_id),\n \"attention_mask\": collate_tokens([s[\"attention_mask\"] for s in samples], 0),\n \"pixel_values\": default_collate([s[\"pixel_values\"][0] for s in samples])\n }\n return functools.partial(collate_fn, pad_id=self.text_tokenizer.pad_token_id)"
},
{
"identifier": "VELDT5Model",
"path": "modeling_veldt5.py",
"snippet": "class VELDT5Model(PreTrainedModel):\n r\"\"\"\n [`VELDT5Model`] is a generic model class that will be instantiated as a transformer architecture with\n one of the base vision model classes of the library as encoder and another one as decoder when created with the\n :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and\n :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.\n \"\"\"\n config_class = VELDT5Config\n base_model_prefix = \"veldt5\"\n main_input_name = \"pixel_values\"\n supports_gradient_checkpointing = True\n\n def __init__(\n self,\n config: Optional[PretrainedConfig] = None,\n encoder: Optional[PreTrainedModel] = None,\n decoder: Optional[PreTrainedModel] = None,\n ):\n if config is None and (encoder is None or decoder is None):\n raise ValueError(\"Either a configuration or an encoder and a decoder has to be provided.\")\n if config is None:\n config = VELDT5Config.from_encoder_decoder_configs(encoder.config, decoder.config)\n else:\n if not isinstance(config, self.config_class):\n raise ValueError(f\"Config: {config} has to be of type {self.config_class}\")\n\n if config.decoder.cross_attention_hidden_size is not None:\n if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:\n raise ValueError(\n \"If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal\"\n f\" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for\"\n f\" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for\"\n \" `config.encoder.hidden_size`.\"\n )\n\n # initialize with config\n # make sure input & output embeddings is not tied\n config.tie_word_embeddings = False\n super().__init__(config)\n\n if encoder is None:\n encoder = ViTModel(config.encoder, add_pooling_layer=False)\n\n if decoder is None:\n decoder = T5DualDecoderDoubleHeadsModel(config.decoder)\n\n self.encoder = encoder\n self.decoder = decoder\n\n if self.encoder.config.to_dict() != self.config.encoder.to_dict():\n logger.warning(\n f\"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:\"\n f\" {self.config.encoder}\"\n )\n if self.decoder.config.to_dict() != self.config.decoder.to_dict():\n logger.warning(\n f\"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:\"\n f\" {self.config.decoder}\"\n )\n\n # make sure that the individual model's config refers to the shared config\n # so that the updates to the config will be synced\n self.encoder.config = self.config.encoder\n self.decoder.config = self.config.decoder\n\n # encoder outputs might need to be projected to different dimension for decoder\n if (\n self.encoder.config.hidden_size != self.decoder.config.hidden_size\n and self.decoder.config.cross_attention_hidden_size is None\n ):\n self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)\n\n if self.encoder.get_output_embeddings() is not None:\n raise ValueError(\n f\"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head\"\n )\n \n\n pooling_config = copy.deepcopy(self.encoder.config)\n pooling_config.summary_type = \"attn\"\n self.global_pooling = SequenceSummary(pooling_config, num_queries=self.config.num_queries_global)\n self.local_pooling = SequenceSummary(pooling_config, num_queries=self.config.num_queries_local)\n\n\n def _set_gradient_checkpointing(self, module, value=False):\n # call both encoder and decoder function on gradient checkpointing\n self.encoder._set_gradient_checkpointing(module, value=value)\n self.decoder._set_gradient_checkpointing(module, value=value)\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n def get_output_embeddings(self):\n return self.decoder.get_output_embeddings()\n\n def set_output_embeddings(self, new_embeddings):\n return self.decoder.set_output_embeddings(new_embeddings)\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n # At the moment fast initialization is not supported for composite models\n if kwargs.get(\"_fast_init\", False):\n logger.warning(\n \"Fast initialization is currently not supported for VELDT5Model. \"\n \"Falling back to slow initialization...\"\n )\n kwargs[\"_fast_init\"] = False\n return super().from_pretrained(*args, **kwargs)\n\n @classmethod\n def from_encoder_decoder_pretrained(\n cls,\n encoder_pretrained_model_name_or_path: str = None,\n decoder_pretrained_model_name_or_path: str = None,\n *model_args,\n **kwargs\n ) -> PreTrainedModel:\n r\"\"\"\n Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model\n checkpoints.\n\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train\n the model, you need to first set it back in training mode with `model.train()`.\n\n Params:\n encoder_pretrained_model_name_or_path (`str`, *optional*):\n Information necessary to initiate the image encoder. Can be either:\n\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. An\n example is `google/vit-base-patch16-224-in21k`.\n - A path to a *directory* containing model weights saved using\n [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.\n - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In\n this case, `from_tf` should be set to `True` and a configuration object should be provided as\n `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a\n PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):\n Information necessary to initiate the text decoder. Can be either:\n\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.\n Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a\n user or organization name, like `dbmdz/bert-base-german-cased`.\n - A path to a *directory* containing model weights saved using\n [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.\n - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In\n this case, `from_tf` should be set to `True` and a configuration object should be provided as\n `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a\n PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args (remaining positional arguments, *optional*):\n All remaning positional arguments will be passed to the underlying model's `__init__` method.\n\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,\n `output_attentions=True`).\n\n - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.\n - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.\n - To update the parent model configuration, do not use a prefix for each configuration parameter.\n\n Behaves differently depending on whether a `config` is provided or automatically loaded.\n\n Example:\n\n ```python\n >>> from transformers import VELDT5Model\n\n >>> # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized\n >>> model = VELDT5Model.from_encoder_decoder_pretrained(\n ... \"google/vit-base-patch16-224-in21k\", \"bert-base-uncased\"\n ... )\n >>> # saving model after fine-tuning\n >>> model.save_pretrained(\"./vit-bert\")\n >>> # load fine-tuned model\n >>> model = VELDT5Model.from_pretrained(\"./vit-bert\")\n ```\"\"\"\n\n kwargs_encoder = {\n argument[len(\"encoder_\") :]: value for argument, value in kwargs.items() if argument.startswith(\"encoder_\")\n }\n\n kwargs_decoder = {\n argument[len(\"decoder_\") :]: value for argument, value in kwargs.items() if argument.startswith(\"decoder_\")\n }\n\n # remove encoder, decoder kwargs from kwargs\n for key in kwargs_encoder.keys():\n del kwargs[\"encoder_\" + key]\n for key in kwargs_decoder.keys():\n del kwargs[\"decoder_\" + key]\n\n # Load and initialize the encoder and decoder\n # The distinction between encoder and decoder at the model level is made\n # by the value of the flag `is_decoder` that we need to set correctly.\n encoder = kwargs_encoder.pop(\"model\", None)\n if encoder is None:\n if encoder_pretrained_model_name_or_path is None:\n raise ValueError(\n \"If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has \"\n \"to be defined.\"\n )\n\n if \"config\" not in kwargs_encoder:\n encoder_config, kwargs_encoder = ViTConfig.from_pretrained(\n encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True\n )\n\n if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:\n logger.info(\n f\"Initializing {encoder_pretrained_model_name_or_path} as a encoder model \"\n \"from a decoder model. Cross-attention and casual mask are disabled.\"\n )\n encoder_config.is_decoder = False\n encoder_config.add_cross_attention = False\n\n kwargs_encoder[\"config\"] = encoder_config\n\n encoder = ViTModel.from_pretrained(encoder_pretrained_model_name_or_path, add_pooling_layer=False, *model_args, **kwargs_encoder)\n\n decoder = kwargs_decoder.pop(\"model\", None)\n if decoder is None:\n if decoder_pretrained_model_name_or_path is None:\n raise ValueError(\n \"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has \"\n \"to be defined.\"\n )\n\n if \"config\" not in kwargs_decoder:\n decoder_config, kwargs_decoder = T5Config.from_pretrained(\n decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True\n )\n\n if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:\n logger.info(\n f\"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention\"\n f\" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if\"\n f\" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers.\"\n )\n decoder_config.is_decoder = True\n decoder_config.add_cross_attention = True\n\n kwargs_decoder[\"config\"] = decoder_config\n\n if kwargs_decoder[\"config\"].is_decoder is False or kwargs_decoder[\"config\"].add_cross_attention is False:\n logger.warning(\n f\"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. \"\n f\"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, \"\n \"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` \"\n \"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a \"\n \"`decoder_config` to `.from_encoder_decoder_pretrained(...)`\"\n )\n\n decoder = T5DualDecoderDoubleHeadsModel.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)\n\n # instantiate config with corresponding kwargs\n config = VELDT5Config.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)\n\n # make sure input & output embeddings is not tied\n config.tie_word_embeddings = False\n return cls(encoder=encoder, decoder=decoder, config=config)\n\n @add_start_docstrings_to_model_forward(VISION_ENCODER_DECODER_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC_VELDT5)\n def forward(\n self,\n pixel_values=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n decoder_inputs_embeds=None,\n labels=None,\n return_contrastive_loss=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n logit_temperature=1.0,\n label_smoothing=0.0,\n **kwargs,\n ):\n r\"\"\"\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import TrOCRProcessor, VisionEncoderDecoderModel\n >>> import requests\n >>> from PIL import Image\n >>> import torch\n\n >>> processor = TrOCRProcessor.from_pretrained(\"microsoft/trocr-base-handwritten\")\n >>> model = VisionEncoderDecoderModel.from_pretrained(\"microsoft/trocr-base-handwritten\")\n\n >>> # load image from the IAM dataset\n >>> url = \"https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw).convert(\"RGB\")\n\n >>> # training\n >>> model.config.decoder_start_token_id = processor.tokenizer.cls_token_id\n >>> model.config.pad_token_id = processor.tokenizer.pad_token_id\n >>> model.config.vocab_size = model.config.decoder.vocab_size\n\n >>> pixel_values = processor(image, return_tensors=\"pt\").pixel_values\n >>> text = \"hello world\"\n >>> labels = processor.tokenizer(text, return_tensors=\"pt\").input_ids\n >>> outputs = model(pixel_values=pixel_values, labels=labels)\n >>> loss = outputs.loss\n\n >>> # inference (generation)\n >>> generated_ids = model.generate(pixel_values)\n >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith(\"decoder_\")}\n\n kwargs_decoder = {\n argument[len(\"decoder_\") :]: value for argument, value in kwargs.items() if argument.startswith(\"decoder_\")\n }\n\n if encoder_outputs is None and pixel_values is not None:\n # if pixel_values is None:\n # raise ValueError(\"You have to specify pixel_values\")\n\n encoder_outputs = self.encoder(\n pixel_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs_encoder,\n )\n elif isinstance(encoder_outputs, tuple):\n encoder_outputs = BaseModelOutput(*encoder_outputs)\n\n encoder_hidden_states = None if encoder_outputs is None else encoder_outputs[0]\n pooler_output_local = None if encoder_outputs is None else self.local_pooling(encoder_hidden_states)\n pooler_output_global = None if encoder_outputs is None else self.global_pooling(pooler_output_local).squeeze(1)\n\n # optionally project encoder_hidden_states\n if (\n self.encoder.config.hidden_size != self.decoder.config.hidden_size\n and self.decoder.config.cross_attention_hidden_size is None\n and pooler_output_local is not None\n ):\n pooler_output_local = self.enc_to_dec_proj(pooler_output_local)\n\n\n # else:\n encoder_attention_mask = None\n\n if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):\n decoder_input_ids = self.decoder.prepare_decoder_input_ids_from_labels(labels)\n\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=pooler_output_local,\n encoder_attention_mask=encoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n use_cache=use_cache,\n past_key_values=past_key_values,\n return_dict=return_dict,\n **kwargs_decoder,\n )\n\n # Compute loss independent from decoder (as some shift the logits inside them)\n loss = None\n if labels is not None:\n logits = decoder_outputs.logits if return_dict else decoder_outputs[0]\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1))\n \n c_loss = None\n if return_contrastive_loss is not None and encoder_outputs is not None:\n decoder_logits = decoder_outputs.ss_logits if return_dict else decoder_outputs[0]\n encoder_logits = pooler_output_global\n loss_fct = CrossEntropyLoss(label_smoothing=label_smoothing)\n\n if (\n self.encoder.config.hidden_size != self.decoder.config.hidden_size\n and self.decoder.config.cross_attention_hidden_size is None\n ):\n encoder_logits = self.enc_to_dec_proj(encoder_logits)\n\n\n encoder_logits = nn.functional.normalize(encoder_logits)\n decoder_logits = nn.functional.normalize(decoder_logits)\n\n batch_size = encoder_logits.size(0)\n scores = torch.mm(decoder_logits, encoder_logits.t())\n target = torch.arange(batch_size).to(decoder_logits.device)\n\n c_loss = loss_fct(scores/logit_temperature, target) + loss_fct(scores.t()/logit_temperature, target)\n\n\n if decoder_outputs.self_decoder_hidden_states is not None and decoder_outputs.cross_decoder_hidden_states is not None:\n decoder_hidden_states = decoder_outputs.self_decoder_hidden_states + decoder_outputs.cross_decoder_hidden_states\n else:\n decoder_hidden_states = None\n\n if decoder_outputs.self_decoder_attentions is not None and decoder_outputs.cross_decoder_attentions is not None:\n decoder_attentions = decoder_outputs.self_decoder_attentions + decoder_outputs.cross_decoder_attentions\n else:\n decoder_attentions = None\n\n if not return_dict:\n outputs = (\n decoder_outputs.logits,\n pooler_output_global,\n pooler_output_local,\n decoder_outputs.ss_logits,\n decoder_outputs.past_key_values,\n decoder_hidden_states,\n decoder_attentions,\n decoder_outputs.cross_attentions,\n None if encoder_outputs is None else encoder_outputs.last_hidden_state,\n None if encoder_outputs is None else encoder_outputs.hidden_states,\n None if encoder_outputs is None else encoder_outputs.attentions,\n )\n if c_loss is not None:\n outputs = (c_loss,) + outputs\n if loss is not None:\n return (loss,) + outputs\n else:\n return outputs\n\n return VELDDoubleHeadsOutput(\n loss=loss,\n c_loss=c_loss,\n logits=decoder_outputs.logits,\n e_logits_g=pooler_output_global,\n e_logits_l=pooler_output_local,\n d_logits=decoder_outputs.ss_logits,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_hidden_states,\n decoder_attentions=decoder_attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=None if encoder_outputs is None else encoder_outputs.last_hidden_state,\n encoder_hidden_states=None if encoder_outputs is None else encoder_outputs.hidden_states,\n encoder_attentions=None if encoder_outputs is None else encoder_outputs.attentions,\n )\n\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n return self.decoder.prepare_decoder_input_ids_from_labels(labels)\n\n def prepare_inputs_for_generation(\n self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs\n ):\n decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past=past)\n decoder_attention_mask = decoder_inputs[\"attention_mask\"] if \"attention_mask\" in decoder_inputs else None\n input_dict = {\n \"attention_mask\": attention_mask,\n \"decoder_attention_mask\": decoder_attention_mask,\n \"decoder_input_ids\": decoder_inputs[\"input_ids\"],\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": decoder_inputs[\"past_key_values\"],\n \"use_cache\": use_cache,\n }\n return input_dict\n\n def resize_token_embeddings(self, *args, **kwargs):\n raise NotImplementedError(\n \"Resizing the embedding layers via the VisionEncoderDecoderModel directly is not supported.Please use the\"\n \" respective methods of the wrapped decoder object (model.decoder.resize_token_embeddings(...))\"\n )\n\n def _reorder_cache(self, past, beam_idx):\n # apply decoder cache reordering here\n return self.decoder._reorder_cache(past, beam_idx)"
}
] | import argparse
import json
import logging
import math
import os
import random
import numpy as np
import torch
import transformers
import datasets
import evaluate
from curses import raw
from datetime import timedelta
from itertools import chain
from torch import nn
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from torch.nn import CrossEntropyLoss
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed, InitProcessGroupKwargs, DistributedDataParallelKwargs
from torch.optim import AdamW
from transformers import (
AutoTokenizer,
ViTFeatureExtractor,
SchedulerType,
get_scheduler,
default_data_collator,
)
from datasets import load_dataset
from data_utils import DatasetForVLAlign
from modeling_veldt5 import VELDT5Model
from mecab import MeCab
from PIL import Image | 8,022 | help="Total number of validation steps to perform.",
)
# parser.add_argument(
# "--max_train_steps_per_epoch",
# type=int,
# default=None,
# help="The number of training steps to perform on a epoch. (for debugging)",
# )
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
# parser.add_argument(
# "--warmup_portion", type=float, default=0, help="Portion of total training steps for the warmup in the lr scheduler."
# )
# parser.add_argument(
# "--checkpointing_steps",
# type=str,
# default=None,
# help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
# )
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
# logging
# parser.add_argument(
# "--logging_steps", type=int, default=0, help="Number of steps for logging (stdout)."
# )
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
parser.add_argument(
"--from_veld_model",
type=str,
default=None,
help=(
"Path to model that you want to test"
),
)
parser.add_argument(
"--save_caption_result",
action="store_true",
help="save caption results in <model_path>/figures/<img_num>.png and <model_path>/figures/captions.json",
)
args = parser.parse_args()
print("[BG] args.validation_path:", args.validation_path)
# assert(False)
return args
def main():
args = parse_args()
accelerator_log_kwargs = {}
if args.with_tracking:
accelerator_log_kwargs["log_with"] = args.report_to
# accelerator_log_kwargs["logging_dir"] = args.output_dir
kwargs_handlers = [
InitProcessGroupKwargs(timeout=timedelta(days=10)),
DistributedDataParallelKwargs(find_unused_parameters=True)
]
# accelerator_log_kwargs["project_dir"] = accelerator_log_kwargs["logging_dir"]
# del accelerator_log_kwargs["logging_dir"]
accelerator = Accelerator(
# gradient_accumulation_steps=args.gradient_accumulation_steps,
kwargs_handlers=kwargs_handlers , **accelerator_log_kwargs)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
random.seed(args.seed)
model = None
# Load model and tokenizer
logger.info("***** Running from a pretrained VELD model *****")
| #!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022 san kim
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = get_logger(__name__)
# epochs=1
# learning_rate=0.001
# scheduler_type=linear
# accelerate launch training_veldt5_accelerate.py \
# --vision_model 'google/vit-base-patch16-384' \
# --language_model 'KETI-AIR/ke-t5-base' \
# --gradient_accumulation_steps 32 \
# --per_device_train_batch_size 16 \
# --per_device_eval_batch_size 16 \
# --warmup_portion 0.02 \
# --logging_steps 20 \
# --checkpointing_steps 10000 \
# --num_train_epochs $epochs \
# --lr_scheduler_type $scheduler_type \
# --with_tracking \
# --output_dir veld_e${epochs}_${scheduler_type}
# accelerate launch training_veldt5_accelerate.py \
# --max_train_steps_per_epoch 100 \
# --max_validation_steps 20 \
# --logging_steps 5 \
# --with_tracking \
# --output_dir test
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a summarization task")
# data
parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task")
parser.add_argument(
"--dataset_name_lm",
type=str,
default="sent_dataset.py",
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name_lm",
type=str,
default="base",
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--hf_cache_dir",
type=str,
default="../huggingface_datasets",
help="The path to cache directory for huggingface datasets.",
)
parser.add_argument(
"--validation_split_percentage",
default=1,
help="The percentage of the train set used as validation set in case there's no validation split",
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=256,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--block_size",
type=int,
default=None,
help=(
"Optional input sequence length after tokenization. The training dataset will be truncated in block of"
" this size for training. Default to the model max input length for single sentence inputs (take into"
" account special tokens)."
),
)
# parser.add_argument("--train_path",
# default="../../downloaded_data/train-filtered.json", type=str)
parser.add_argument("--validation_path",
default="../../downloaded_data/validation-filtered.json", type=str)
# parser.add_argument("--image_root_dir",
# default="../../downloaded_data", type=str)
parser.add_argument(
"--dataset_name",
type=str,
default="image_text_pair_datasets.py",
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default="base",
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--hf_data_dir",
type=str,
default="../../downloaded_data",
help="The path to data directory for huggingface datasets.",
)
# model
parser.add_argument("--vision_model",
default="google/vit-base-patch16-384", type=str)
parser.add_argument("--language_model",
default="KETI-AIR/ke-t5-base", type=str)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
# training
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
# parser.add_argument(
# "--gradient_accumulation_steps",
# type=int,
# default=1,
# help="Number of updates steps to accumulate before performing a backward/update pass.",
# )
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=16,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=8e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--contrastive_weight", default=1.0,
type=float, help="The weighting value for contrastive loss")
parser.add_argument("--captioning_weight", default=2.0,
type=float, help="The weighting value for captioning loss")
parser.add_argument("--lm_weight", default=1.0,
type=float, help="The weighting value for lm loss")
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--logit_temperature", default=1.0,
type=float, help="temperature for logits")
parser.add_argument("--label_smoothing", default=0.0,
type=float, help="label smoothing for cross entropy")
# parser.add_argument("--num_train_epochs", type=int, default=1, help="Total number of training epochs to perform.")
# parser.add_argument(
# "--max_train_steps",
# type=int,
# default=None,
# help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
# )
parser.add_argument(
"--max_validation_steps",
type=int,
default=None,
help="Total number of validation steps to perform.",
)
# parser.add_argument(
# "--max_train_steps_per_epoch",
# type=int,
# default=None,
# help="The number of training steps to perform on a epoch. (for debugging)",
# )
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
# parser.add_argument(
# "--warmup_portion", type=float, default=0, help="Portion of total training steps for the warmup in the lr scheduler."
# )
# parser.add_argument(
# "--checkpointing_steps",
# type=str,
# default=None,
# help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
# )
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
# logging
# parser.add_argument(
# "--logging_steps", type=int, default=0, help="Number of steps for logging (stdout)."
# )
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
parser.add_argument(
"--from_veld_model",
type=str,
default=None,
help=(
"Path to model that you want to test"
),
)
parser.add_argument(
"--save_caption_result",
action="store_true",
help="save caption results in <model_path>/figures/<img_num>.png and <model_path>/figures/captions.json",
)
args = parser.parse_args()
print("[BG] args.validation_path:", args.validation_path)
# assert(False)
return args
def main():
args = parse_args()
accelerator_log_kwargs = {}
if args.with_tracking:
accelerator_log_kwargs["log_with"] = args.report_to
# accelerator_log_kwargs["logging_dir"] = args.output_dir
kwargs_handlers = [
InitProcessGroupKwargs(timeout=timedelta(days=10)),
DistributedDataParallelKwargs(find_unused_parameters=True)
]
# accelerator_log_kwargs["project_dir"] = accelerator_log_kwargs["logging_dir"]
# del accelerator_log_kwargs["logging_dir"]
accelerator = Accelerator(
# gradient_accumulation_steps=args.gradient_accumulation_steps,
kwargs_handlers=kwargs_handlers , **accelerator_log_kwargs)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
random.seed(args.seed)
model = None
# Load model and tokenizer
logger.info("***** Running from a pretrained VELD model *****") | model = VELDT5Model.from_pretrained(args.from_veld_model) | 1 | 2023-12-19 01:37:23+00:00 | 12k |
sidharthrajaram/StyleTTS2 | src/styletts2/models.py | [
{
"identifier": "ASRCNN",
"path": "src/styletts2/Utils/ASR/models.py",
"snippet": "class ASRCNN(nn.Module):\n def __init__(self,\n input_dim=80,\n hidden_dim=256,\n n_token=35,\n n_layers=6,\n token_embedding_dim=256,\n\n ):\n super().__init__()\n self.n_token = n_token\n self.n_down = 1\n self.to_mfcc = MFCC()\n self.init_cnn = ConvNorm(input_dim//2, hidden_dim, kernel_size=7, padding=3, stride=2)\n self.cnns = nn.Sequential(\n *[nn.Sequential(\n ConvBlock(hidden_dim),\n nn.GroupNorm(num_groups=1, num_channels=hidden_dim)\n ) for n in range(n_layers)])\n self.projection = ConvNorm(hidden_dim, hidden_dim // 2)\n self.ctc_linear = nn.Sequential(\n LinearNorm(hidden_dim//2, hidden_dim),\n nn.ReLU(),\n LinearNorm(hidden_dim, n_token))\n self.asr_s2s = ASRS2S(\n embedding_dim=token_embedding_dim,\n hidden_dim=hidden_dim//2,\n n_token=n_token)\n\n def forward(self, x, src_key_padding_mask=None, text_input=None):\n x = self.to_mfcc(x)\n x = self.init_cnn(x)\n x = self.cnns(x)\n x = self.projection(x)\n x = x.transpose(1, 2)\n ctc_logit = self.ctc_linear(x)\n if text_input is not None:\n _, s2s_logit, s2s_attn = self.asr_s2s(x, src_key_padding_mask, text_input)\n return ctc_logit, s2s_logit, s2s_attn\n else:\n return ctc_logit\n\n def get_feature(self, x):\n x = self.to_mfcc(x.squeeze(1))\n x = self.init_cnn(x)\n x = self.cnns(x)\n x = self.projection(x)\n return x\n\n def length_to_mask(self, lengths):\n mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)\n mask = torch.gt(mask+1, lengths.unsqueeze(1)).to(lengths.device)\n return mask\n\n def get_future_mask(self, out_length, unmask_future_steps=0):\n \"\"\"\n Args:\n out_length (int): returned mask shape is (out_length, out_length).\n unmask_futre_steps (int): unmasking future step size.\n Return:\n mask (torch.BoolTensor): mask future timesteps mask[i, j] = True if i > j + unmask_future_steps else False\n \"\"\"\n index_tensor = torch.arange(out_length).unsqueeze(0).expand(out_length, -1)\n mask = torch.gt(index_tensor, index_tensor.T + unmask_future_steps)\n return mask"
},
{
"identifier": "JDCNet",
"path": "src/styletts2/Utils/JDC/model.py",
"snippet": "class JDCNet(nn.Module):\n \"\"\"\n Joint Detection and Classification Network model for singing voice melody.\n \"\"\"\n def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01):\n super().__init__()\n self.num_class = num_class\n\n # input = (b, 1, 31, 513), b = batch size\n self.conv_block = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), # out: (b, 64, 31, 513)\n nn.BatchNorm2d(num_features=64),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.Conv2d(64, 64, 3, padding=1, bias=False), # (b, 64, 31, 513)\n )\n\n # res blocks\n self.res_block1 = ResBlock(in_channels=64, out_channels=128) # (b, 128, 31, 128)\n self.res_block2 = ResBlock(in_channels=128, out_channels=192) # (b, 192, 31, 32)\n self.res_block3 = ResBlock(in_channels=192, out_channels=256) # (b, 256, 31, 8)\n\n # pool block\n self.pool_block = nn.Sequential(\n nn.BatchNorm2d(num_features=256),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.MaxPool2d(kernel_size=(1, 4)), # (b, 256, 31, 2)\n nn.Dropout(p=0.2),\n )\n\n # maxpool layers (for auxiliary network inputs)\n # in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2)\n self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40))\n # in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2)\n self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20))\n # in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2)\n self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10))\n\n # in = (b, 640, 31, 2), out = (b, 256, 31, 2)\n self.detector_conv = nn.Sequential(\n nn.Conv2d(640, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.Dropout(p=0.2),\n )\n\n # input: (b, 31, 512) - resized from (b, 256, 31, 2)\n self.bilstm_classifier = nn.LSTM(\n input_size=512, hidden_size=256,\n batch_first=True, bidirectional=True) # (b, 31, 512)\n\n # input: (b, 31, 512) - resized from (b, 256, 31, 2)\n self.bilstm_detector = nn.LSTM(\n input_size=512, hidden_size=256,\n batch_first=True, bidirectional=True) # (b, 31, 512)\n\n # input: (b * 31, 512)\n self.classifier = nn.Linear(in_features=512, out_features=self.num_class) # (b * 31, num_class)\n\n # input: (b * 31, 512)\n self.detector = nn.Linear(in_features=512, out_features=2) # (b * 31, 2) - binary classifier\n\n # initialize weights\n self.apply(self.init_weights)\n\n def get_feature_GAN(self, x):\n seq_len = x.shape[-2]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n \n return poolblock_out.transpose(-1, -2)\n \n def get_feature(self, x):\n seq_len = x.shape[-2]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n \n return self.pool_block[2](poolblock_out)\n \n def forward(self, x):\n \"\"\"\n Returns:\n classification_prediction, detection_prediction\n sizes: (b, 31, 722), (b, 31, 2)\n \"\"\"\n ###############################\n # forward pass for classifier #\n ###############################\n seq_len = x.shape[-1]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n \n \n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n GAN_feature = poolblock_out.transpose(-1, -2)\n poolblock_out = self.pool_block[2](poolblock_out)\n \n # (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512)\n classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, seq_len, 512))\n classifier_out, _ = self.bilstm_classifier(classifier_out) # ignore the hidden states\n\n classifier_out = classifier_out.contiguous().view((-1, 512)) # (b * 31, 512)\n classifier_out = self.classifier(classifier_out)\n classifier_out = classifier_out.view((-1, seq_len, self.num_class)) # (b, 31, num_class)\n \n # sizes: (b, 31, 722), (b, 31, 2)\n # classifier output consists of predicted pitch classes per frame\n # detector output consists of: (isvoice, notvoice) estimates per frame\n return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out\n\n @staticmethod\n def init_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.kaiming_uniform_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv2d):\n nn.init.xavier_normal_(m.weight)\n elif isinstance(m, nn.LSTM) or isinstance(m, nn.LSTMCell):\n for p in m.parameters():\n if p.data is None:\n continue\n\n if len(p.shape) >= 2:\n nn.init.orthogonal_(p.data)\n else:\n nn.init.normal_(p.data)"
},
{
"identifier": "KDiffusion",
"path": "src/styletts2/Modules/diffusion/sampler.py",
"snippet": "class KDiffusion(Diffusion):\n \"\"\"Elucidated Diffusion (Karras et al. 2022): https://arxiv.org/abs/2206.00364\"\"\"\n\n alias = \"k\"\n\n def __init__(\n self,\n net: nn.Module,\n *,\n sigma_distribution: Distribution,\n sigma_data: float, # data distribution standard deviation\n dynamic_threshold: float = 0.0,\n ):\n super().__init__()\n self.net = net\n self.sigma_data = sigma_data\n self.sigma_distribution = sigma_distribution\n self.dynamic_threshold = dynamic_threshold\n\n def get_scale_weights(self, sigmas: Tensor) -> Tuple[Tensor, ...]:\n sigma_data = self.sigma_data\n c_noise = torch.log(sigmas) * 0.25\n sigmas = rearrange(sigmas, \"b -> b 1 1\")\n c_skip = (sigma_data ** 2) / (sigmas ** 2 + sigma_data ** 2)\n c_out = sigmas * sigma_data * (sigma_data ** 2 + sigmas ** 2) ** -0.5\n c_in = (sigmas ** 2 + sigma_data ** 2) ** -0.5\n return c_skip, c_out, c_in, c_noise\n\n def denoise_fn(\n self,\n x_noisy: Tensor,\n sigmas: Optional[Tensor] = None,\n sigma: Optional[float] = None,\n **kwargs,\n ) -> Tensor:\n batch_size, device = x_noisy.shape[0], x_noisy.device\n sigmas = to_batch(x=sigma, xs=sigmas, batch_size=batch_size, device=device)\n\n # Predict network output and add skip connection\n c_skip, c_out, c_in, c_noise = self.get_scale_weights(sigmas)\n x_pred = self.net(c_in * x_noisy, c_noise, **kwargs)\n x_denoised = c_skip * x_noisy + c_out * x_pred\n\n return x_denoised\n\n def loss_weight(self, sigmas: Tensor) -> Tensor:\n # Computes weight depending on data distribution\n return (sigmas ** 2 + self.sigma_data ** 2) * (sigmas * self.sigma_data) ** -2\n\n def forward(self, x: Tensor, noise: Tensor = None, **kwargs) -> Tensor:\n batch_size, device = x.shape[0], x.device\n from einops import rearrange, reduce\n\n # Sample amount of noise to add for each batch element\n sigmas = self.sigma_distribution(num_samples=batch_size, device=device)\n sigmas_padded = rearrange(sigmas, \"b -> b 1 1\")\n\n # Add noise to input\n noise = default(noise, lambda: torch.randn_like(x))\n x_noisy = x + sigmas_padded * noise\n \n # Compute denoised values\n x_denoised = self.denoise_fn(x_noisy, sigmas=sigmas, **kwargs)\n\n # Compute weighted loss\n losses = F.mse_loss(x_denoised, x, reduction=\"none\")\n losses = reduce(losses, \"b ... -> b\", \"mean\")\n losses = losses * self.loss_weight(sigmas)\n loss = losses.mean()\n return loss"
},
{
"identifier": "LogNormalDistribution",
"path": "src/styletts2/Modules/diffusion/sampler.py",
"snippet": "class LogNormalDistribution(Distribution):\n def __init__(self, mean: float, std: float):\n self.mean = mean\n self.std = std\n\n def __call__(\n self, num_samples: int, device: torch.device = torch.device(\"cpu\")\n ) -> Tensor:\n normal = self.mean + self.std * torch.randn((num_samples,), device=device)\n return normal.exp()"
},
{
"identifier": "Transformer1d",
"path": "src/styletts2/Modules/diffusion/modules.py",
"snippet": "class Transformer1d(nn.Module):\n def __init__(\n self,\n num_layers: int,\n channels: int,\n num_heads: int,\n head_features: int,\n multiplier: int,\n use_context_time: bool = True,\n use_rel_pos: bool = False,\n context_features_multiplier: int = 1,\n rel_pos_num_buckets: Optional[int] = None,\n rel_pos_max_distance: Optional[int] = None,\n context_features: Optional[int] = None,\n context_embedding_features: Optional[int] = None,\n embedding_max_length: int = 512,\n ):\n super().__init__()\n\n self.blocks = nn.ModuleList(\n [\n TransformerBlock(\n features=channels + context_embedding_features,\n head_features=head_features,\n num_heads=num_heads,\n multiplier=multiplier,\n use_rel_pos=use_rel_pos,\n rel_pos_num_buckets=rel_pos_num_buckets,\n rel_pos_max_distance=rel_pos_max_distance,\n )\n for i in range(num_layers)\n ]\n )\n\n self.to_out = nn.Sequential(\n Rearrange(\"b t c -> b c t\"),\n nn.Conv1d(\n in_channels=channels + context_embedding_features,\n out_channels=channels,\n kernel_size=1,\n ),\n )\n \n use_context_features = exists(context_features)\n self.use_context_features = use_context_features\n self.use_context_time = use_context_time\n\n if use_context_time or use_context_features:\n context_mapping_features = channels + context_embedding_features\n\n self.to_mapping = nn.Sequential(\n nn.Linear(context_mapping_features, context_mapping_features),\n nn.GELU(),\n nn.Linear(context_mapping_features, context_mapping_features),\n nn.GELU(),\n )\n \n if use_context_time:\n assert exists(context_mapping_features)\n self.to_time = nn.Sequential(\n TimePositionalEmbedding(\n dim=channels, out_features=context_mapping_features\n ),\n nn.GELU(),\n )\n\n if use_context_features:\n assert exists(context_features) and exists(context_mapping_features)\n self.to_features = nn.Sequential(\n nn.Linear(\n in_features=context_features, out_features=context_mapping_features\n ),\n nn.GELU(),\n )\n \n self.fixed_embedding = FixedEmbedding(\n max_length=embedding_max_length, features=context_embedding_features\n )\n \n\n def get_mapping(\n self, time: Optional[Tensor] = None, features: Optional[Tensor] = None\n ) -> Optional[Tensor]:\n \"\"\"Combines context time features and features into mapping\"\"\"\n items, mapping = [], None\n # Compute time features\n if self.use_context_time:\n assert_message = \"use_context_time=True but no time features provided\"\n assert exists(time), assert_message\n items += [self.to_time(time)]\n # Compute features\n if self.use_context_features:\n assert_message = \"context_features exists but no features provided\"\n assert exists(features), assert_message\n items += [self.to_features(features)]\n\n # Compute joint mapping\n if self.use_context_time or self.use_context_features:\n mapping = reduce(torch.stack(items), \"n b m -> b m\", \"sum\")\n mapping = self.to_mapping(mapping)\n\n return mapping\n \n def run(self, x, time, embedding, features):\n \n mapping = self.get_mapping(time, features)\n x = torch.cat([x.expand(-1, embedding.size(1), -1), embedding], axis=-1)\n mapping = mapping.unsqueeze(1).expand(-1, embedding.size(1), -1)\n \n for block in self.blocks:\n x = x + mapping\n x = block(x)\n \n x = x.mean(axis=1).unsqueeze(1)\n x = self.to_out(x)\n x = x.transpose(-1, -2)\n \n return x\n \n def forward(self, x: Tensor, \n time: Tensor, \n embedding_mask_proba: float = 0.0,\n embedding: Optional[Tensor] = None, \n features: Optional[Tensor] = None,\n embedding_scale: float = 1.0) -> Tensor:\n \n b, device = embedding.shape[0], embedding.device\n fixed_embedding = self.fixed_embedding(embedding)\n if embedding_mask_proba > 0.0:\n # Randomly mask embedding\n batch_mask = rand_bool(\n shape=(b, 1, 1), proba=embedding_mask_proba, device=device\n )\n embedding = torch.where(batch_mask, fixed_embedding, embedding)\n\n if embedding_scale != 1.0:\n # Compute both normal and fixed embedding outputs\n out = self.run(x, time, embedding=embedding, features=features)\n out_masked = self.run(x, time, embedding=fixed_embedding, features=features)\n # Scale conditional output using classifier-free guidance\n return out_masked + (out - out_masked) * embedding_scale\n else:\n return self.run(x, time, embedding=embedding, features=features)\n \n return x"
},
{
"identifier": "StyleTransformer1d",
"path": "src/styletts2/Modules/diffusion/modules.py",
"snippet": "class StyleTransformer1d(nn.Module):\n def __init__(\n self,\n num_layers: int,\n channels: int,\n num_heads: int,\n head_features: int,\n multiplier: int,\n use_context_time: bool = True,\n use_rel_pos: bool = False,\n context_features_multiplier: int = 1,\n rel_pos_num_buckets: Optional[int] = None,\n rel_pos_max_distance: Optional[int] = None,\n context_features: Optional[int] = None,\n context_embedding_features: Optional[int] = None,\n embedding_max_length: int = 512,\n ):\n super().__init__()\n\n self.blocks = nn.ModuleList(\n [\n StyleTransformerBlock(\n features=channels + context_embedding_features,\n head_features=head_features,\n num_heads=num_heads,\n multiplier=multiplier,\n style_dim=context_features,\n use_rel_pos=use_rel_pos,\n rel_pos_num_buckets=rel_pos_num_buckets,\n rel_pos_max_distance=rel_pos_max_distance,\n )\n for i in range(num_layers)\n ]\n )\n\n self.to_out = nn.Sequential(\n Rearrange(\"b t c -> b c t\"),\n nn.Conv1d(\n in_channels=channels + context_embedding_features,\n out_channels=channels,\n kernel_size=1,\n ),\n )\n \n use_context_features = exists(context_features)\n self.use_context_features = use_context_features\n self.use_context_time = use_context_time\n\n if use_context_time or use_context_features:\n context_mapping_features = channels + context_embedding_features\n\n self.to_mapping = nn.Sequential(\n nn.Linear(context_mapping_features, context_mapping_features),\n nn.GELU(),\n nn.Linear(context_mapping_features, context_mapping_features),\n nn.GELU(),\n )\n \n if use_context_time:\n assert exists(context_mapping_features)\n self.to_time = nn.Sequential(\n TimePositionalEmbedding(\n dim=channels, out_features=context_mapping_features\n ),\n nn.GELU(),\n )\n\n if use_context_features:\n assert exists(context_features) and exists(context_mapping_features)\n self.to_features = nn.Sequential(\n nn.Linear(\n in_features=context_features, out_features=context_mapping_features\n ),\n nn.GELU(),\n )\n \n self.fixed_embedding = FixedEmbedding(\n max_length=embedding_max_length, features=context_embedding_features\n )\n \n\n def get_mapping(\n self, time: Optional[Tensor] = None, features: Optional[Tensor] = None\n ) -> Optional[Tensor]:\n \"\"\"Combines context time features and features into mapping\"\"\"\n items, mapping = [], None\n # Compute time features\n if self.use_context_time:\n assert_message = \"use_context_time=True but no time features provided\"\n assert exists(time), assert_message\n items += [self.to_time(time)]\n # Compute features\n if self.use_context_features:\n assert_message = \"context_features exists but no features provided\"\n assert exists(features), assert_message\n items += [self.to_features(features)]\n\n # Compute joint mapping\n if self.use_context_time or self.use_context_features:\n mapping = reduce(torch.stack(items), \"n b m -> b m\", \"sum\")\n mapping = self.to_mapping(mapping)\n\n return mapping\n \n def run(self, x, time, embedding, features):\n \n mapping = self.get_mapping(time, features)\n x = torch.cat([x.expand(-1, embedding.size(1), -1), embedding], axis=-1)\n mapping = mapping.unsqueeze(1).expand(-1, embedding.size(1), -1)\n \n for block in self.blocks:\n x = x + mapping\n x = block(x, features)\n \n x = x.mean(axis=1).unsqueeze(1)\n x = self.to_out(x)\n x = x.transpose(-1, -2)\n \n return x\n \n def forward(self, x: Tensor, \n time: Tensor, \n embedding_mask_proba: float = 0.0,\n embedding: Optional[Tensor] = None, \n features: Optional[Tensor] = None,\n embedding_scale: float = 1.0) -> Tensor:\n \n b, device = embedding.shape[0], embedding.device\n fixed_embedding = self.fixed_embedding(embedding)\n if embedding_mask_proba > 0.0:\n # Randomly mask embedding\n batch_mask = rand_bool(\n shape=(b, 1, 1), proba=embedding_mask_proba, device=device\n )\n embedding = torch.where(batch_mask, fixed_embedding, embedding)\n\n if embedding_scale != 1.0:\n # Compute both normal and fixed embedding outputs\n out = self.run(x, time, embedding=embedding, features=features)\n out_masked = self.run(x, time, embedding=fixed_embedding, features=features)\n # Scale conditional output using classifier-free guidance\n return out_masked + (out - out_masked) * embedding_scale\n else:\n return self.run(x, time, embedding=embedding, features=features)\n \n return x"
},
{
"identifier": "AudioDiffusionConditional",
"path": "src/styletts2/Modules/diffusion/diffusion.py",
"snippet": "class AudioDiffusionConditional(Model1d):\n def __init__(\n self,\n embedding_features: int,\n embedding_max_length: int,\n embedding_mask_proba: float = 0.1,\n **kwargs,\n ):\n self.embedding_mask_proba = embedding_mask_proba\n default_kwargs = dict(\n **get_default_model_kwargs(),\n unet_type=\"cfg\",\n context_embedding_features=embedding_features,\n context_embedding_max_length=embedding_max_length,\n )\n super().__init__(**{**default_kwargs, **kwargs})\n\n def forward(self, *args, **kwargs):\n default_kwargs = dict(embedding_mask_proba=self.embedding_mask_proba)\n return super().forward(*args, **{**default_kwargs, **kwargs})\n\n def sample(self, *args, **kwargs):\n default_kwargs = dict(\n **get_default_sampling_kwargs(),\n embedding_scale=5.0,\n )\n return super().sample(*args, **{**default_kwargs, **kwargs})"
},
{
"identifier": "MultiPeriodDiscriminator",
"path": "src/styletts2/Modules/discriminators.py",
"snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self):\n super(MultiPeriodDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList([\n DiscriminatorP(2),\n DiscriminatorP(3),\n DiscriminatorP(5),\n DiscriminatorP(7),\n DiscriminatorP(11),\n ])\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "MultiResSpecDiscriminator",
"path": "src/styletts2/Modules/discriminators.py",
"snippet": "class MultiResSpecDiscriminator(torch.nn.Module):\n\n def __init__(self,\n fft_sizes=[1024, 2048, 512],\n hop_sizes=[120, 240, 50],\n win_lengths=[600, 1200, 240],\n window=\"hann_window\"):\n\n super(MultiResSpecDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList([\n SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window),\n SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window),\n SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window)\n ])\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "WavLMDiscriminator",
"path": "src/styletts2/Modules/discriminators.py",
"snippet": "class WavLMDiscriminator(nn.Module):\n \"\"\"docstring for Discriminator.\"\"\"\n\n def __init__(self, slm_hidden=768, \n slm_layers=13, \n initial_channel=64, \n use_spectral_norm=False):\n super(WavLMDiscriminator, self).__init__()\n norm_f = weight_norm if use_spectral_norm == False else spectral_norm\n self.pre = norm_f(Conv1d(slm_hidden * slm_layers, initial_channel, 1, 1, padding=0))\n \n self.convs = nn.ModuleList([\n norm_f(nn.Conv1d(initial_channel, initial_channel * 2, kernel_size=5, padding=2)),\n norm_f(nn.Conv1d(initial_channel * 2, initial_channel * 4, kernel_size=5, padding=2)),\n norm_f(nn.Conv1d(initial_channel * 4, initial_channel * 4, 5, 1, padding=2)),\n ])\n\n self.conv_post = norm_f(Conv1d(initial_channel * 4, 1, 3, 1, padding=1))\n \n def forward(self, x):\n x = self.pre(x)\n \n fmap = []\n for l in self.convs:\n x = l(x)\n x = F.leaky_relu(x, LRELU_SLOPE)\n fmap.append(x)\n x = self.conv_post(x)\n x = torch.flatten(x, 1, -1)\n\n return x"
}
] | import os
import os.path as osp
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import yaml
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from .Utils.ASR.models import ASRCNN
from .Utils.JDC.model import JDCNet
from .Modules.diffusion.sampler import KDiffusion, LogNormalDistribution
from .Modules.diffusion.modules import Transformer1d, StyleTransformer1d
from .Modules.diffusion.diffusion import AudioDiffusionConditional
from .Modules.discriminators import MultiPeriodDiscriminator, MultiResSpecDiscriminator, WavLMDiscriminator
from munch import Munch
from .Modules.istftnet import Decoder
from .Modules.hifigan import Decoder | 10,224 |
def forward(self, texts, style, text_lengths, alignment, m):
d = self.text_encoder(texts, style, text_lengths, m)
batch_size = d.shape[0]
text_size = d.shape[1]
# predict duration
input_lengths = text_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
d, input_lengths, batch_first=True, enforce_sorted=False)
m = m.to(text_lengths.device).unsqueeze(1)
self.lstm.flatten_parameters()
x, _ = self.lstm(x)
x, _ = nn.utils.rnn.pad_packed_sequence(
x, batch_first=True)
x_pad = torch.zeros([x.shape[0], m.shape[-1], x.shape[-1]])
x_pad[:, :x.shape[1], :] = x
x = x_pad.to(x.device)
duration = self.duration_proj(nn.functional.dropout(x, 0.5, training=self.training))
en = (d.transpose(-1, -2) @ alignment)
return duration.squeeze(-1), en
def F0Ntrain(self, x, s):
x, _ = self.shared(x.transpose(-1, -2))
F0 = x.transpose(-1, -2)
for block in self.F0:
F0 = block(F0, s)
F0 = self.F0_proj(F0)
N = x.transpose(-1, -2)
for block in self.N:
N = block(N, s)
N = self.N_proj(N)
return F0.squeeze(1), N.squeeze(1)
def length_to_mask(self, lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
class DurationEncoder(nn.Module):
def __init__(self, sty_dim, d_model, nlayers, dropout=0.1):
super().__init__()
self.lstms = nn.ModuleList()
for _ in range(nlayers):
self.lstms.append(nn.LSTM(d_model + sty_dim,
d_model // 2,
num_layers=1,
batch_first=True,
bidirectional=True,
dropout=dropout))
self.lstms.append(AdaLayerNorm(sty_dim, d_model))
self.dropout = dropout
self.d_model = d_model
self.sty_dim = sty_dim
def forward(self, x, style, text_lengths, m):
masks = m.to(text_lengths.device)
x = x.permute(2, 0, 1)
s = style.expand(x.shape[0], x.shape[1], -1)
x = torch.cat([x, s], axis=-1)
x.masked_fill_(masks.unsqueeze(-1).transpose(0, 1), 0.0)
x = x.transpose(0, 1)
input_lengths = text_lengths.cpu().numpy()
x = x.transpose(-1, -2)
for block in self.lstms:
if isinstance(block, AdaLayerNorm):
x = block(x.transpose(-1, -2), style).transpose(-1, -2)
x = torch.cat([x, s.permute(1, -1, 0)], axis=1)
x.masked_fill_(masks.unsqueeze(-1).transpose(-1, -2), 0.0)
else:
x = x.transpose(-1, -2)
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True, enforce_sorted=False)
block.flatten_parameters()
x, _ = block(x)
x, _ = nn.utils.rnn.pad_packed_sequence(
x, batch_first=True)
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.transpose(-1, -2)
x_pad = torch.zeros([x.shape[0], x.shape[1], m.shape[-1]])
x_pad[:, :, :x.shape[-1]] = x
x = x_pad.to(x.device)
return x.transpose(-1, -2)
def inference(self, x, style):
x = self.embedding(x.transpose(-1, -2)) * math.sqrt(self.d_model)
style = style.expand(x.shape[0], x.shape[1], -1)
x = torch.cat([x, style], axis=-1)
src = self.pos_encoder(x)
output = self.transformer_encoder(src).transpose(0, 1)
return output
def length_to_mask(self, lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
def load_F0_models(path):
# load F0 model
| #coding:utf-8
class LearnedDownSample(nn.Module):
def __init__(self, layer_type, dim_in):
super().__init__()
self.layer_type = layer_type
if self.layer_type == 'none':
self.conv = nn.Identity()
elif self.layer_type == 'timepreserve':
self.conv = spectral_norm(nn.Conv2d(dim_in, dim_in, kernel_size=(3, 1), stride=(2, 1), groups=dim_in, padding=(1, 0)))
elif self.layer_type == 'half':
self.conv = spectral_norm(nn.Conv2d(dim_in, dim_in, kernel_size=(3, 3), stride=(2, 2), groups=dim_in, padding=1))
else:
raise RuntimeError('Got unexpected donwsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
def forward(self, x):
return self.conv(x)
class LearnedUpSample(nn.Module):
def __init__(self, layer_type, dim_in):
super().__init__()
self.layer_type = layer_type
if self.layer_type == 'none':
self.conv = nn.Identity()
elif self.layer_type == 'timepreserve':
self.conv = nn.ConvTranspose2d(dim_in, dim_in, kernel_size=(3, 1), stride=(2, 1), groups=dim_in, output_padding=(1, 0), padding=(1, 0))
elif self.layer_type == 'half':
self.conv = nn.ConvTranspose2d(dim_in, dim_in, kernel_size=(3, 3), stride=(2, 2), groups=dim_in, output_padding=1, padding=1)
else:
raise RuntimeError('Got unexpected upsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
def forward(self, x):
return self.conv(x)
class DownSample(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
elif self.layer_type == 'timepreserve':
return F.avg_pool2d(x, (2, 1))
elif self.layer_type == 'half':
if x.shape[-1] % 2 != 0:
x = torch.cat([x, x[..., -1].unsqueeze(-1)], dim=-1)
return F.avg_pool2d(x, 2)
else:
raise RuntimeError('Got unexpected donwsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
class UpSample(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
elif self.layer_type == 'timepreserve':
return F.interpolate(x, scale_factor=(2, 1), mode='nearest')
elif self.layer_type == 'half':
return F.interpolate(x, scale_factor=2, mode='nearest')
else:
raise RuntimeError('Got unexpected upsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
class ResBlk(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2),
normalize=False, downsample='none'):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = DownSample(downsample)
self.downsample_res = LearnedDownSample(downsample, dim_in)
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = spectral_norm(nn.Conv2d(dim_in, dim_in, 3, 1, 1))
self.conv2 = spectral_norm(nn.Conv2d(dim_in, dim_out, 3, 1, 1))
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = spectral_norm(nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False))
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = self.downsample(x)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
x = self.downsample_res(x)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x):
x = self._shortcut(x) + self._residual(x)
return x / math.sqrt(2) # unit variance
class StyleEncoder(nn.Module):
def __init__(self, dim_in=48, style_dim=48, max_conv_dim=384):
super().__init__()
blocks = []
blocks += [spectral_norm(nn.Conv2d(1, dim_in, 3, 1, 1))]
repeat_num = 4
for _ in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample='half')]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [spectral_norm(nn.Conv2d(dim_out, dim_out, 5, 1, 0))]
blocks += [nn.AdaptiveAvgPool2d(1)]
blocks += [nn.LeakyReLU(0.2)]
self.shared = nn.Sequential(*blocks)
self.unshared = nn.Linear(dim_out, style_dim)
def forward(self, x):
h = self.shared(x)
h = h.view(h.size(0), -1)
s = self.unshared(h)
return s
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class Discriminator2d(nn.Module):
def __init__(self, dim_in=48, num_domains=1, max_conv_dim=384, repeat_num=4):
super().__init__()
blocks = []
blocks += [spectral_norm(nn.Conv2d(1, dim_in, 3, 1, 1))]
for lid in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample='half')]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [spectral_norm(nn.Conv2d(dim_out, dim_out, 5, 1, 0))]
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.AdaptiveAvgPool2d(1)]
blocks += [spectral_norm(nn.Conv2d(dim_out, num_domains, 1, 1, 0))]
self.main = nn.Sequential(*blocks)
def get_feature(self, x):
features = []
for l in self.main:
x = l(x)
features.append(x)
out = features[-1]
out = out.view(out.size(0), -1) # (batch, num_domains)
return out, features
def forward(self, x):
out, features = self.get_feature(x)
out = out.squeeze() # (batch)
return out, features
class ResBlk1d(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2),
normalize=False, downsample='none', dropout_p=0.2):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample_type = downsample
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
self.dropout_p = dropout_p
if self.downsample_type == 'none':
self.pool = nn.Identity()
else:
self.pool = weight_norm(nn.Conv1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1))
def _build_weights(self, dim_in, dim_out):
self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_in, 3, 1, 1))
self.conv2 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1))
if self.normalize:
self.norm1 = nn.InstanceNorm1d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm1d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False))
def downsample(self, x):
if self.downsample_type == 'none':
return x
else:
if x.shape[-1] % 2 != 0:
x = torch.cat([x, x[..., -1].unsqueeze(-1)], dim=-1)
return F.avg_pool1d(x, 2)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
x = self.downsample(x)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = F.dropout(x, p=self.dropout_p, training=self.training)
x = self.conv1(x)
x = self.pool(x)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = F.dropout(x, p=self.dropout_p, training=self.training)
x = self.conv2(x)
return x
def forward(self, x):
x = self._shortcut(x) + self._residual(x)
return x / math.sqrt(2) # unit variance
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class TextEncoder(nn.Module):
def __init__(self, channels, kernel_size, depth, n_symbols, actv=nn.LeakyReLU(0.2)):
super().__init__()
self.embedding = nn.Embedding(n_symbols, channels)
padding = (kernel_size - 1) // 2
self.cnn = nn.ModuleList()
for _ in range(depth):
self.cnn.append(nn.Sequential(
weight_norm(nn.Conv1d(channels, channels, kernel_size=kernel_size, padding=padding)),
LayerNorm(channels),
actv,
nn.Dropout(0.2),
))
# self.cnn = nn.Sequential(*self.cnn)
self.lstm = nn.LSTM(channels, channels//2, 1, batch_first=True, bidirectional=True)
def forward(self, x, input_lengths, m):
x = self.embedding(x) # [B, T, emb]
x = x.transpose(1, 2) # [B, emb, T]
m = m.to(input_lengths.device).unsqueeze(1)
x.masked_fill_(m, 0.0)
for c in self.cnn:
x = c(x)
x.masked_fill_(m, 0.0)
x = x.transpose(1, 2) # [B, T, chn]
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True, enforce_sorted=False)
self.lstm.flatten_parameters()
x, _ = self.lstm(x)
x, _ = nn.utils.rnn.pad_packed_sequence(
x, batch_first=True)
x = x.transpose(-1, -2)
x_pad = torch.zeros([x.shape[0], x.shape[1], m.shape[-1]])
x_pad[:, :, :x.shape[-1]] = x
x = x_pad.to(x.device)
x.masked_fill_(m, 0.0)
return x
def inference(self, x):
x = self.embedding(x)
x = x.transpose(1, 2)
x = self.cnn(x)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
x, _ = self.lstm(x)
return x
def length_to_mask(self, lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
class AdaIN1d(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm1d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features*2)
def forward(self, x, s):
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1)
gamma, beta = torch.chunk(h, chunks=2, dim=1)
return (1 + gamma) * self.norm(x) + beta
class UpSample1d(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
else:
return F.interpolate(x, scale_factor=2, mode='nearest')
class AdainResBlk1d(nn.Module):
def __init__(self, dim_in, dim_out, style_dim=64, actv=nn.LeakyReLU(0.2),
upsample='none', dropout_p=0.0):
super().__init__()
self.actv = actv
self.upsample_type = upsample
self.upsample = UpSample1d(upsample)
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out, style_dim)
self.dropout = nn.Dropout(dropout_p)
if upsample == 'none':
self.pool = nn.Identity()
else:
self.pool = weight_norm(nn.ConvTranspose1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1, output_padding=1))
def _build_weights(self, dim_in, dim_out, style_dim):
self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1))
self.conv2 = weight_norm(nn.Conv1d(dim_out, dim_out, 3, 1, 1))
self.norm1 = AdaIN1d(style_dim, dim_in)
self.norm2 = AdaIN1d(style_dim, dim_out)
if self.learned_sc:
self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False))
def _shortcut(self, x):
x = self.upsample(x)
if self.learned_sc:
x = self.conv1x1(x)
return x
def _residual(self, x, s):
x = self.norm1(x, s)
x = self.actv(x)
x = self.pool(x)
x = self.conv1(self.dropout(x))
x = self.norm2(x, s)
x = self.actv(x)
x = self.conv2(self.dropout(x))
return x
def forward(self, x, s):
out = self._residual(x, s)
out = (out + self._shortcut(x)) / math.sqrt(2)
return out
class AdaLayerNorm(nn.Module):
def __init__(self, style_dim, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.fc = nn.Linear(style_dim, channels*2)
def forward(self, x, s):
x = x.transpose(-1, -2)
x = x.transpose(1, -1)
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1)
gamma, beta = torch.chunk(h, chunks=2, dim=1)
gamma, beta = gamma.transpose(1, -1), beta.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), eps=self.eps)
x = (1 + gamma) * x + beta
return x.transpose(1, -1).transpose(-1, -2)
class ProsodyPredictor(nn.Module):
def __init__(self, style_dim, d_hid, nlayers, max_dur=50, dropout=0.1):
super().__init__()
self.text_encoder = DurationEncoder(sty_dim=style_dim,
d_model=d_hid,
nlayers=nlayers,
dropout=dropout)
self.lstm = nn.LSTM(d_hid + style_dim, d_hid // 2, 1, batch_first=True, bidirectional=True)
self.duration_proj = LinearNorm(d_hid, max_dur)
self.shared = nn.LSTM(d_hid + style_dim, d_hid // 2, 1, batch_first=True, bidirectional=True)
self.F0 = nn.ModuleList()
self.F0.append(AdainResBlk1d(d_hid, d_hid, style_dim, dropout_p=dropout))
self.F0.append(AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True, dropout_p=dropout))
self.F0.append(AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim, dropout_p=dropout))
self.N = nn.ModuleList()
self.N.append(AdainResBlk1d(d_hid, d_hid, style_dim, dropout_p=dropout))
self.N.append(AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True, dropout_p=dropout))
self.N.append(AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim, dropout_p=dropout))
self.F0_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0)
self.N_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0)
def forward(self, texts, style, text_lengths, alignment, m):
d = self.text_encoder(texts, style, text_lengths, m)
batch_size = d.shape[0]
text_size = d.shape[1]
# predict duration
input_lengths = text_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
d, input_lengths, batch_first=True, enforce_sorted=False)
m = m.to(text_lengths.device).unsqueeze(1)
self.lstm.flatten_parameters()
x, _ = self.lstm(x)
x, _ = nn.utils.rnn.pad_packed_sequence(
x, batch_first=True)
x_pad = torch.zeros([x.shape[0], m.shape[-1], x.shape[-1]])
x_pad[:, :x.shape[1], :] = x
x = x_pad.to(x.device)
duration = self.duration_proj(nn.functional.dropout(x, 0.5, training=self.training))
en = (d.transpose(-1, -2) @ alignment)
return duration.squeeze(-1), en
def F0Ntrain(self, x, s):
x, _ = self.shared(x.transpose(-1, -2))
F0 = x.transpose(-1, -2)
for block in self.F0:
F0 = block(F0, s)
F0 = self.F0_proj(F0)
N = x.transpose(-1, -2)
for block in self.N:
N = block(N, s)
N = self.N_proj(N)
return F0.squeeze(1), N.squeeze(1)
def length_to_mask(self, lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
class DurationEncoder(nn.Module):
def __init__(self, sty_dim, d_model, nlayers, dropout=0.1):
super().__init__()
self.lstms = nn.ModuleList()
for _ in range(nlayers):
self.lstms.append(nn.LSTM(d_model + sty_dim,
d_model // 2,
num_layers=1,
batch_first=True,
bidirectional=True,
dropout=dropout))
self.lstms.append(AdaLayerNorm(sty_dim, d_model))
self.dropout = dropout
self.d_model = d_model
self.sty_dim = sty_dim
def forward(self, x, style, text_lengths, m):
masks = m.to(text_lengths.device)
x = x.permute(2, 0, 1)
s = style.expand(x.shape[0], x.shape[1], -1)
x = torch.cat([x, s], axis=-1)
x.masked_fill_(masks.unsqueeze(-1).transpose(0, 1), 0.0)
x = x.transpose(0, 1)
input_lengths = text_lengths.cpu().numpy()
x = x.transpose(-1, -2)
for block in self.lstms:
if isinstance(block, AdaLayerNorm):
x = block(x.transpose(-1, -2), style).transpose(-1, -2)
x = torch.cat([x, s.permute(1, -1, 0)], axis=1)
x.masked_fill_(masks.unsqueeze(-1).transpose(-1, -2), 0.0)
else:
x = x.transpose(-1, -2)
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True, enforce_sorted=False)
block.flatten_parameters()
x, _ = block(x)
x, _ = nn.utils.rnn.pad_packed_sequence(
x, batch_first=True)
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.transpose(-1, -2)
x_pad = torch.zeros([x.shape[0], x.shape[1], m.shape[-1]])
x_pad[:, :, :x.shape[-1]] = x
x = x_pad.to(x.device)
return x.transpose(-1, -2)
def inference(self, x, style):
x = self.embedding(x.transpose(-1, -2)) * math.sqrt(self.d_model)
style = style.expand(x.shape[0], x.shape[1], -1)
x = torch.cat([x, style], axis=-1)
src = self.pos_encoder(x)
output = self.transformer_encoder(src).transpose(0, 1)
return output
def length_to_mask(self, lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
def load_F0_models(path):
# load F0 model
| F0_model = JDCNet(num_class=1, seq_len=192) | 1 | 2023-12-15 10:04:21+00:00 | 12k |
alibaba/u2mot | yolox/tracker/u2mot_tracker.py | [
{
"identifier": "BaseTrack",
"path": "yolox/tracker/basetrack.py",
"snippet": "class BaseTrack(object):\n _count = 0\n\n track_id = 0\n is_activated = False\n state = TrackState.New\n\n history = OrderedDict()\n features = []\n curr_feature = None\n score = 0\n start_frame = 0\n frame_id = 0\n time_since_update = 0\n\n # multi-camera\n location = (np.inf, np.inf)\n\n @property\n def end_frame(self):\n return self.frame_id\n\n @staticmethod\n def next_id():\n BaseTrack._count += 1\n return BaseTrack._count\n\n def activate(self, *args):\n raise NotImplementedError\n\n def predict(self):\n raise NotImplementedError\n\n def update(self, *args, **kwargs):\n raise NotImplementedError\n\n def mark_lost(self):\n self.state = TrackState.Lost\n\n def mark_long_lost(self):\n self.state = TrackState.LongLost\n\n def mark_removed(self):\n self.state = TrackState.Removed\n\n @staticmethod\n def clear_count():\n BaseTrack._count = 0"
},
{
"identifier": "TrackState",
"path": "yolox/tracker/basetrack.py",
"snippet": "class TrackState(object):\n New = 0\n Tracked = 1\n Lost = 2\n LongLost = 3\n Removed = 4"
},
{
"identifier": "KalmanFilter",
"path": "yolox/tracker/kalman_filter.py",
"snippet": "class KalmanFilter(object):\n \"\"\"\n A simple Kalman filter for tracking bounding boxes in image space.\n\n The 8-dimensional state space\n\n x, y, w, h, vx, vy, vw, vh\n\n contains the bounding box center position (x, y), width w, height h,\n and their respective velocities.\n\n Object motion follows a constant velocity model. The bounding box location\n (x, y, w, h) is taken as direct observation of the state space (linear\n observation model).\n\n \"\"\"\n\n def __init__(self):\n ndim, dt = 4, 1.\n\n # Create Kalman filter model matrices.\n self._motion_mat = np.eye(2 * ndim, 2 * ndim)\n for i in range(ndim):\n self._motion_mat[i, ndim + i] = dt\n self._update_mat = np.eye(ndim, 2 * ndim)\n\n # Motion and observation uncertainty are chosen relative to the current\n # state estimate. These weights control the amount of uncertainty in\n # the model. This is a bit hacky.\n self._std_weight_position = 1. / 20\n self._std_weight_velocity = 1. / 160\n\n def initiate(self, measurement):\n \"\"\"Create track from unassociated measurement.\n\n Parameters\n ----------\n measurement : ndarray\n Bounding box coordinates (x, y, w, h) with center position (x, y),\n width w, and height h.\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the mean vector (8 dimensional) and covariance matrix (8x8\n dimensional) of the new track. Unobserved velocities are initialized\n to 0 mean.\n\n \"\"\"\n mean_pos = measurement\n mean_vel = np.zeros_like(mean_pos)\n mean = np.r_[mean_pos, mean_vel]\n\n std = [\n 2 * self._std_weight_position * measurement[2],\n 2 * self._std_weight_position * measurement[3],\n 2 * self._std_weight_position * measurement[2],\n 2 * self._std_weight_position * measurement[3],\n 10 * self._std_weight_velocity * measurement[2],\n 10 * self._std_weight_velocity * measurement[3],\n 10 * self._std_weight_velocity * measurement[2],\n 10 * self._std_weight_velocity * measurement[3]]\n covariance = np.diag(np.square(std))\n return mean, covariance\n\n def predict(self, mean, covariance):\n \"\"\"Run Kalman filter prediction step.\n\n Parameters\n ----------\n mean : ndarray\n The 8 dimensional mean vector of the object state at the previous\n time step.\n covariance : ndarray\n The 8x8 dimensional covariance matrix of the object state at the\n previous time step.\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the mean vector and covariance matrix of the predicted\n state. Unobserved velocities are initialized to 0 mean.\n\n \"\"\"\n std_pos = [\n self._std_weight_position * mean[2],\n self._std_weight_position * mean[3],\n self._std_weight_position * mean[2],\n self._std_weight_position * mean[3]]\n std_vel = [\n self._std_weight_velocity * mean[2],\n self._std_weight_velocity * mean[3],\n self._std_weight_velocity * mean[2],\n self._std_weight_velocity * mean[3]]\n motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))\n\n mean = np.dot(mean, self._motion_mat.T)\n covariance = np.linalg.multi_dot((\n self._motion_mat, covariance, self._motion_mat.T)) + motion_cov\n\n return mean, covariance\n\n def project(self, mean, covariance):\n \"\"\"Project state distribution to measurement space.\n\n Parameters\n ----------\n mean : ndarray\n The state's mean vector (8 dimensional array).\n covariance : ndarray\n The state's covariance matrix (8x8 dimensional).\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the projected mean and covariance matrix of the given state\n estimate.\n\n \"\"\"\n std = [\n self._std_weight_position * mean[2],\n self._std_weight_position * mean[3],\n self._std_weight_position * mean[2],\n self._std_weight_position * mean[3]]\n innovation_cov = np.diag(np.square(std))\n\n mean = np.dot(self._update_mat, mean)\n covariance = np.linalg.multi_dot((\n self._update_mat, covariance, self._update_mat.T))\n return mean, covariance + innovation_cov\n\n def multi_predict(self, mean, covariance):\n \"\"\"Run Kalman filter prediction step (Vectorized version).\n Parameters\n ----------\n mean : ndarray\n The Nx8 dimensional mean matrix of the object states at the previous\n time step.\n covariance : ndarray\n The Nx8x8 dimensional covariance matrics of the object states at the\n previous time step.\n Returns\n -------\n (ndarray, ndarray)\n Returns the mean vector and covariance matrix of the predicted\n state. Unobserved velocities are initialized to 0 mean.\n \"\"\"\n std_pos = [\n self._std_weight_position * mean[:, 2],\n self._std_weight_position * mean[:, 3],\n self._std_weight_position * mean[:, 2],\n self._std_weight_position * mean[:, 3]]\n std_vel = [\n self._std_weight_velocity * mean[:, 2],\n self._std_weight_velocity * mean[:, 3],\n self._std_weight_velocity * mean[:, 2],\n self._std_weight_velocity * mean[:, 3]]\n sqr = np.square(np.r_[std_pos, std_vel]).T\n\n motion_cov = []\n for i in range(len(mean)):\n motion_cov.append(np.diag(sqr[i]))\n motion_cov = np.asarray(motion_cov)\n\n mean = np.dot(mean, self._motion_mat.T)\n left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))\n covariance = np.dot(left, self._motion_mat.T) + motion_cov\n\n return mean, covariance\n\n def update(self, mean, covariance, measurement):\n \"\"\"Run Kalman filter correction step.\n\n Parameters\n ----------\n mean : ndarray\n The predicted state's mean vector (8 dimensional).\n covariance : ndarray\n The state's covariance matrix (8x8 dimensional).\n measurement : ndarray\n The 4 dimensional measurement vector (x, y, w, h), where (x, y)\n is the center position, w the width, and h the height of the\n bounding box.\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the measurement-corrected state distribution.\n\n \"\"\"\n projected_mean, projected_cov = self.project(mean, covariance)\n\n chol_factor, lower = scipy.linalg.cho_factor(\n projected_cov, lower=True, check_finite=False)\n kalman_gain = scipy.linalg.cho_solve(\n (chol_factor, lower), np.dot(covariance, self._update_mat.T).T,\n check_finite=False).T\n innovation = measurement - projected_mean\n\n new_mean = mean + np.dot(innovation, kalman_gain.T)\n new_covariance = covariance - np.linalg.multi_dot((\n kalman_gain, projected_cov, kalman_gain.T))\n return new_mean, new_covariance\n\n def gating_distance(self, mean, covariance, measurements,\n only_position=False, metric='maha'):\n \"\"\"Compute gating distance between state distribution and measurements.\n A suitable distance threshold can be obtained from `chi2inv95`. If\n `only_position` is False, the chi-square distribution has 4 degrees of\n freedom, otherwise 2.\n Parameters\n ----------\n mean : ndarray\n Mean vector over the state distribution (8 dimensional).\n covariance : ndarray\n Covariance of the state distribution (8x8 dimensional).\n measurements : ndarray\n An Nx4 dimensional matrix of N measurements, each in\n format (x, y, a, h) where (x, y) is the bounding box center\n position, a the aspect ratio, and h the height.\n only_position : Optional[bool]\n If True, distance computation is done with respect to the bounding\n box center position only.\n Returns\n -------\n ndarray\n Returns an array of length N, where the i-th element contains the\n squared Mahalanobis distance between (mean, covariance) and\n `measurements[i]`.\n \"\"\"\n mean, covariance = self.project(mean, covariance)\n if only_position:\n mean, covariance = mean[:2], covariance[:2, :2]\n measurements = measurements[:, :2]\n\n d = measurements - mean\n if metric == 'gaussian':\n return np.sum(d * d, axis=1)\n elif metric == 'maha':\n cholesky_factor = np.linalg.cholesky(covariance)\n z = scipy.linalg.solve_triangular(\n cholesky_factor, d.T, lower=True, check_finite=False,\n overwrite_b=True)\n squared_maha = np.sum(z * z, axis=0)\n return squared_maha\n else:\n raise ValueError('invalid distance metric')"
},
{
"identifier": "GMC",
"path": "yolox/tracker/gmc.py",
"snippet": "class GMC:\n def __init__(self, method='orb', downscale=2, verbose=None):\n super(GMC, self).__init__()\n\n self.method = method\n self.downscale = max(1, int(downscale))\n\n if self.method == 'orb':\n self.detector = cv2.FastFeatureDetector_create(20)\n self.extractor = cv2.ORB_create()\n self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)\n seqName = verbose[0]\n fileDir = verbose[1]\n\n if '-FRCNN' in seqName:\n seqName = seqName[:-6]\n elif '-DPM' in seqName:\n seqName = seqName[:-4]\n elif '-SDP' in seqName:\n seqName = seqName[:-4]\n \n self.gmcFile = open(f\"yolox/tracker/GMC_files/{fileDir}/GMC-{seqName}.txt\", 'w+')\n\n elif self.method == 'sift':\n self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)\n self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)\n self.matcher = cv2.BFMatcher(cv2.NORM_L2)\n\n elif self.method == 'ecc':\n number_of_iterations = 5000\n termination_eps = 1e-6\n self.warp_mode = cv2.MOTION_EUCLIDEAN\n self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)\n\n elif self.method == 'file' or self.method == 'files':\n seqName = verbose[0]\n # MOT17_ablation, MOTChallenge, VisDrone/test-dev, BDD100K/val, BDD100K/test\n fileDir = verbose[1]\n filePath = f'yolox/tracker/GMC_files/{fileDir}'\n\n if '-FRCNN' in seqName:\n seqName = seqName[:-6]\n elif '-DPM' in seqName:\n seqName = seqName[:-4]\n elif '-SDP' in seqName:\n seqName = seqName[:-4]\n\n self.gmcFile = open(filePath + \"/GMC-\" + seqName + \".txt\", 'r')\n\n if self.gmcFile is None:\n raise ValueError(\"Error: Unable to open GMC file in directory:\" + filePath)\n elif self.method == 'none' or self.method == 'None':\n self.method = 'none'\n else:\n raise ValueError(\"Error: Unknown CMC method:\" + method)\n\n self.prevFrame = None\n self.prevKeyPoints = None\n self.prevDescriptors = None\n\n self.initializedFirstFrame = False\n self.frameCnt = 0\n\n def apply(self, raw_frame, detections=None):\n if self.method == 'orb' or self.method == 'sift':\n try:\n H = self.applyFeaures(raw_frame, detections)\n except:\n H = np.array([[1., 0., 0.], [0., 1., 0.]])\n self.gmcFile.write('%d\\t%.6f\\t%.6f\\t%.6f\\t%.6f\\t%.6f\\t%.6f\\t\\n' % \\\n (self.frameCnt, H[0, 0], H[0, 1], H[0, 2], H[1, 0], H[1, 1], H[1, 2]))\n self.frameCnt += 1\n return H\n elif self.method == 'ecc':\n return self.applyEcc(raw_frame, detections)\n elif self.method == 'file':\n return self.applyFile(raw_frame, detections)\n elif self.method == 'none':\n return np.eye(2, 3)\n else:\n return np.eye(2, 3)\n\n def applyEcc(self, raw_frame, detections=None):\n\n # Initialize\n height, width, _ = raw_frame.shape\n frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)\n H = np.eye(2, 3, dtype=np.float32)\n\n # Downscale image (TODO: consider using pyramids)\n if self.downscale > 1.0:\n frame = cv2.GaussianBlur(frame, (3, 3), 1.5)\n frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))\n width = width // self.downscale\n height = height // self.downscale\n\n # Handle first frame\n if not self.initializedFirstFrame:\n # Initialize data\n self.prevFrame = frame.copy()\n\n # Initialization done\n self.initializedFirstFrame = True\n\n return H\n\n # Run the ECC algorithm. The results are stored in warp_matrix.\n # (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria)\n try:\n (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1)\n except:\n print('Warning: find transform failed. Set warp as identity')\n\n return H\n\n def applyFeaures(self, raw_frame, detections=None):\n\n # Initialize\n height, width, _ = raw_frame.shape\n frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)\n H = np.eye(2, 3)\n\n # Downscale image (TODO: consider using pyramids)\n if self.downscale > 1.0:\n # frame = cv2.GaussianBlur(frame, (3, 3), 1.5)\n frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))\n width = width // self.downscale\n height = height // self.downscale\n\n # find the keypoints\n mask = np.zeros_like(frame)\n # mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255\n mask[int(0.02 * height): int(0.98 * height), int(0.02 * width): int(0.98 * width)] = 255\n if detections is not None:\n for det in detections:\n tlbr = (det[:4] / self.downscale).astype(np.int_)\n mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0\n\n keypoints = self.detector.detect(frame, mask)\n\n # compute the descriptors\n keypoints, descriptors = self.extractor.compute(frame, keypoints)\n\n # Handle first frame\n if not self.initializedFirstFrame:\n # Initialize data\n self.prevFrame = frame.copy()\n self.prevKeyPoints = copy.copy(keypoints)\n self.prevDescriptors = copy.copy(descriptors)\n\n # Initialization done\n self.initializedFirstFrame = True\n\n return H\n\n # Match descriptors.\n knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2)\n\n # Filtered matches based on smallest spatial distance\n matches = []\n spatialDistances = []\n\n maxSpatialDistance = 0.25 * np.array([width, height])\n\n # Handle empty matches case\n if len(knnMatches) == 0:\n # Store to next iteration\n self.prevFrame = frame.copy()\n self.prevKeyPoints = copy.copy(keypoints)\n self.prevDescriptors = copy.copy(descriptors)\n\n return H\n\n for m, n in knnMatches:\n if m.distance < 0.9 * n.distance:\n prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt\n currKeyPointLocation = keypoints[m.trainIdx].pt\n\n spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0],\n prevKeyPointLocation[1] - currKeyPointLocation[1])\n\n if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \\\n (np.abs(spatialDistance[1]) < maxSpatialDistance[1]):\n spatialDistances.append(spatialDistance)\n matches.append(m)\n\n meanSpatialDistances = np.mean(spatialDistances, 0)\n stdSpatialDistances = np.std(spatialDistances, 0)\n\n inliesrs = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances\n\n goodMatches = []\n prevPoints = []\n currPoints = []\n for i in range(len(matches)):\n if inliesrs[i, 0] and inliesrs[i, 1]:\n goodMatches.append(matches[i])\n prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt)\n currPoints.append(keypoints[matches[i].trainIdx].pt)\n\n prevPoints = np.array(prevPoints)\n currPoints = np.array(currPoints)\n\n # Draw the keypoint matches on the output image\n if 0:\n matches_img = np.hstack((self.prevFrame, frame))\n matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR)\n W = np.size(self.prevFrame, 1)\n for m in goodMatches:\n prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_)\n curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_)\n curr_pt[0] += W\n color = np.random.randint(0, 255, (3,))\n color = (int(color[0]), int(color[1]), int(color[2]))\n\n matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA)\n matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1)\n matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1)\n\n plt.figure()\n plt.imshow(matches_img)\n plt.show()\n\n # Find rigid matrix\n if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):\n H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)\n\n # Handle downscale\n if self.downscale > 1.0:\n H[0, 2] *= self.downscale\n H[1, 2] *= self.downscale\n else:\n print('Warning: not enough matching points')\n\n # Store to next iteration\n self.prevFrame = frame.copy()\n self.prevKeyPoints = copy.copy(keypoints)\n self.prevDescriptors = copy.copy(descriptors)\n\n return H\n\n def applyFile(self, raw_frame=None, detections=None):\n line = self.gmcFile.readline()\n tokens = line.split(\"\\t\")\n H = np.eye(2, 3, dtype=np.float_)\n if len(tokens) > 6:\n H[0, 0] = float(tokens[1])\n H[0, 1] = float(tokens[2])\n H[0, 2] = float(tokens[3])\n H[1, 0] = float(tokens[4])\n H[1, 1] = float(tokens[5])\n H[1, 2] = float(tokens[6])\n\n return H"
}
] | import numpy as np
from collections import deque
from .basetrack import BaseTrack, TrackState
from .kalman_filter import KalmanFilter
from .gmc import GMC
from . import matching | 7,874 | # @jit(nopython=True)
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
"""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
# ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
@property
# @jit(nopython=True)
def tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
@property
def xywh(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[:2] += ret[2:] / 2.0
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_xyah(tlwh):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
@staticmethod
def tlwh_to_xywh(tlwh):
"""Convert bounding box to format `(center x, center y, width,
height)`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
return ret
def to_xyah(self):
return self.tlwh_to_xyah(self.tlwh)
def to_xywh(self):
return self.tlwh_to_xywh(self.tlwh)
@staticmethod
# @jit(nopython=True)
def tlbr_to_tlwh(tlbr):
ret = np.asarray(tlbr).copy()
ret[2:] -= ret[:2]
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_tlbr(tlwh):
ret = np.asarray(tlwh).copy()
ret[2:] += ret[:2]
return ret
def __repr__(self):
return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
class DefaultArgs(object):
def __init__(self, mot20=False):
self.track_thresh = 0.6
self.low_thresh = 0.1
self.track_buffer = 30
self.match_thresh = 0.8
self.mot20 = mot20
self.mask_emb_with_iou = True
self.fuse_emb_and_iou = 'min'
self.cmc_method = 'none'
self.cmc_seq_name = ''
self.cmc_file_dir = ''
class U2MOTTracker(object):
def __init__(self, args, frame_rate=30):
self.tracked_stracks = [] # type: list[STrack]
self.lost_stracks = [] # type: list[STrack]
self.removed_stracks = [] # type: list[STrack]
BaseTrack.clear_count()
self.frame_id = 0
# self.args = args
self.mot20 = args.mot20
# self.det_thresh = args.track_thresh + 0.1
self.track_high_thresh = args.track_thresh
self.track_low_thresh = args.low_thresh
self.new_track_thresh = args.track_thresh + 0.1
self.match_thresh = args.match_thresh
self.buffer_size = int(frame_rate / 30.0 * args.track_buffer)
self.max_time_lost = self.buffer_size
self.kalman_filter = KalmanFilter()
# ReID module
self.iou_only = False
self.mask_emb_with_iou = args.mask_emb_with_iou # default True
self.fuse_emb_and_iou = args.fuse_emb_and_iou # default min, choice: [min, mean]
self.proximity_thresh = 0.5
self.appearance_thresh = 0.25
| #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
class STrack(BaseTrack):
shared_kalman = KalmanFilter()
def __init__(self, tlwh, score, cls=0, feat=None, feat_history=50):
# wait activate
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.cls = -1
self.cls_hist = [] # (cls id, freq)
self.update_cls(cls, score)
self.score = score
self.tracklet_len = 0
self.smooth_feat = None
self.curr_feat = None
self.features = deque([], maxlen=feat_history)
if feat is not None:
self.update_features(feat)
self.alpha = 0.9
def update_features(self, feat):
feat /= np.linalg.norm(feat)
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
def update_cls(self, cls, score):
if len(self.cls_hist) > 0:
max_freq = 0
found = False
for c in self.cls_hist:
if cls == c[0]:
c[1] += score
found = True
if c[1] > max_freq:
max_freq = c[1]
self.cls = c[0]
if not found:
self.cls_hist.append([cls, score])
self.cls = cls
else:
self.cls_hist.append([cls, score])
self.cls = cls
def predict(self):
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[6] = 0
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
@staticmethod
def multi_predict(stracks):
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][6] = 0
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
@staticmethod
def multi_gmc(stracks, H=np.eye(2, 3)):
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
R = H[:2, :2]
R8x8 = np.kron(np.eye(4, dtype=float), R)
t = H[:2, 2]
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
mean = R8x8.dot(mean)
mean[:2] += t
cov = R8x8.dot(cov).dot(R8x8.transpose())
stracks[i].mean = mean
stracks[i].covariance = cov
def activate(self, kalman_filter, frame_id):
"""Start a new tracklet"""
self.kalman_filter = kalman_filter
self.track_id = self.next_id()
# self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xywh(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
if frame_id == 1:
self.is_activated = True
# self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
def re_activate(self, new_track, frame_id, new_id=False):
# self.mean, self.covariance = self.kalman_filter.update(
# self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
# )
self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xywh(new_track.tlwh))
if new_track.curr_feat is not None:
self.update_features(new_track.curr_feat)
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id()
self.score = new_track.score
self.update_cls(new_track.cls, new_track.score)
def update(self, new_track, frame_id):
"""
Update a matched track
:type new_track: STrack
:type frame_id: int
:type update_feature: bool
:return:
"""
self.frame_id = frame_id
self.tracklet_len += 1
new_tlwh = new_track.tlwh
# self.mean, self.covariance = self.kalman_filter.update(
# self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xywh(new_tlwh))
if new_track.curr_feat is not None:
self.update_features(new_track.curr_feat)
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
self.update_cls(new_track.cls, new_track.score)
@property
# @jit(nopython=True)
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
"""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
# ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
@property
# @jit(nopython=True)
def tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
@property
def xywh(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[:2] += ret[2:] / 2.0
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_xyah(tlwh):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
@staticmethod
def tlwh_to_xywh(tlwh):
"""Convert bounding box to format `(center x, center y, width,
height)`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
return ret
def to_xyah(self):
return self.tlwh_to_xyah(self.tlwh)
def to_xywh(self):
return self.tlwh_to_xywh(self.tlwh)
@staticmethod
# @jit(nopython=True)
def tlbr_to_tlwh(tlbr):
ret = np.asarray(tlbr).copy()
ret[2:] -= ret[:2]
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_tlbr(tlwh):
ret = np.asarray(tlwh).copy()
ret[2:] += ret[:2]
return ret
def __repr__(self):
return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
class DefaultArgs(object):
def __init__(self, mot20=False):
self.track_thresh = 0.6
self.low_thresh = 0.1
self.track_buffer = 30
self.match_thresh = 0.8
self.mot20 = mot20
self.mask_emb_with_iou = True
self.fuse_emb_and_iou = 'min'
self.cmc_method = 'none'
self.cmc_seq_name = ''
self.cmc_file_dir = ''
class U2MOTTracker(object):
def __init__(self, args, frame_rate=30):
self.tracked_stracks = [] # type: list[STrack]
self.lost_stracks = [] # type: list[STrack]
self.removed_stracks = [] # type: list[STrack]
BaseTrack.clear_count()
self.frame_id = 0
# self.args = args
self.mot20 = args.mot20
# self.det_thresh = args.track_thresh + 0.1
self.track_high_thresh = args.track_thresh
self.track_low_thresh = args.low_thresh
self.new_track_thresh = args.track_thresh + 0.1
self.match_thresh = args.match_thresh
self.buffer_size = int(frame_rate / 30.0 * args.track_buffer)
self.max_time_lost = self.buffer_size
self.kalman_filter = KalmanFilter()
# ReID module
self.iou_only = False
self.mask_emb_with_iou = args.mask_emb_with_iou # default True
self.fuse_emb_and_iou = args.fuse_emb_and_iou # default min, choice: [min, mean]
self.proximity_thresh = 0.5
self.appearance_thresh = 0.25
| self.gmc = GMC(method=args.cmc_method, | 3 | 2023-12-18 10:04:40+00:00 | 12k |
liuhuang31/HiFTNet-sr | train.py | [
{
"identifier": "AttrDict",
"path": "env.py",
"snippet": "class AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self"
},
{
"identifier": "build_env",
"path": "env.py",
"snippet": "def build_env(config, config_name, path):\n t_path = os.path.join(path, config_name)\n if config != t_path:\n os.makedirs(path, exist_ok=True)\n shutil.copyfile(config, os.path.join(path, config_name))"
},
{
"identifier": "MelDataset",
"path": "meldataset.py",
"snippet": "class MelDataset(torch.utils.data.Dataset):\n def __init__(self, training_files, segment_size, n_fft, num_mels,\n hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1,\n device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None):\n self.audio_files = training_files\n random.seed(1234)\n if shuffle:\n random.shuffle(self.audio_files)\n self.segment_size = segment_size\n self.sampling_rate = sampling_rate\n self.split = split\n self.n_fft = n_fft\n self.num_mels = num_mels\n self.hop_size = hop_size\n self.win_size = win_size\n self.fmin = fmin\n self.fmax = fmax\n self.fmax_loss = fmax_loss\n self.cached_wav = None\n self.n_cache_reuse = n_cache_reuse\n self._cache_ref_count = 0\n self.device = device\n self.fine_tuning = fine_tuning\n self.base_mels_path = base_mels_path\n\n def __getitem__(self, index):\n filename = self.audio_files[index]\n if self._cache_ref_count == 0:\n audio, sampling_rate = load_wav(filename, self.sampling_rate)\n # audio = audio / MAX_WAV_VALUE\n if not self.fine_tuning:\n audio = normalize(audio) * 0.95\n self.cached_wav = audio\n if sampling_rate != self.sampling_rate:\n raise ValueError(\"{} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate))\n self._cache_ref_count = self.n_cache_reuse\n else:\n audio = self.cached_wav\n self._cache_ref_count -= 1\n\n audio = torch.FloatTensor(audio)\n audio = audio.unsqueeze(0)\n\n if not self.fine_tuning:\n if self.split:\n if audio.size(1) >= self.segment_size:\n max_audio_start = audio.size(1) - self.segment_size\n audio_start = random.randint(0, max_audio_start)\n audio = audio[:, audio_start:audio_start+self.segment_size]\n else:\n audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')\n\n mel = mel_spectrogram(audio, self.n_fft, self.num_mels,\n self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,\n center=False, training=True)\n else:\n mel = np.load(\n os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))\n mel = torch.from_numpy(mel)\n\n if len(mel.shape) < 3:\n mel = mel.unsqueeze(0)\n\n if self.split:\n frames_per_seg = math.ceil(self.segment_size / self.hop_size)\n\n if audio.size(1) >= self.segment_size:\n mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)\n mel = mel[:, :, mel_start:mel_start + frames_per_seg]\n audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size]\n else:\n mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant')\n audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')\n\n mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,\n self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,\n center=False)\n\n return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())\n\n def __len__(self):\n return len(self.audio_files)"
},
{
"identifier": "mel_spectrogram",
"path": "meldataset.py",
"snippet": "def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False, training=False):\n # if torch.min(y) < -1.:\n # print('min value is ', torch.min(y))\n # if torch.max(y) > 1.:\n # print('max value is ', torch.max(y))\n if training:\n with torch.no_grad():\n # 16k to 24k/48k\n if fmax <= 8000 and (sampling_rate == 24000 or sampling_rate == 48000):\n y = y.squeeze().cpu().numpy()\n y = librosa.resample(y, sampling_rate, 16000)\n y = librosa.resample(y, 16000, 24000)\n y = torch.FloatTensor(y)\n y = y.unsqueeze(0)\n sampling_rate = 24000\n n_fft = int(n_fft/2)\n hop_size=int(hop_size/2)\n win_size=int(win_size/2)\n # 24k to 48k\n elif fmax <= 12000 and sampling_rate == 48000:\n y = y.squeeze().cpu().numpy()\n y = librosa.resample(y, sampling_rate, 24000)\n y = torch.FloatTensor(y)\n y = y.unsqueeze(0)\n sampling_rate = 24000\n n_fft = int(n_fft/2)\n hop_size=int(hop_size/2)\n win_size=int(win_size/2)\n else:\n pass\n\n global mel_basis, hann_window\n if fmax not in mel_basis:\n mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)\n mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)\n hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n # complex tensor as default, then use view_as_real for future pytorch compatibility\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],\n center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)\n spec = torch.view_as_real(spec)\n spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))\n\n spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec"
},
{
"identifier": "get_dataset_filelist",
"path": "meldataset.py",
"snippet": "def get_dataset_filelist(a):\n training_files =[]\n validation_files =[]\n total_files = 0\n input_wave_dirs = a.input_wavs_dir.split(\",\")\n\n for wave_dir in input_wave_dirs:\n num_validation_files = 3\n files_under_path = 0\n allfiles = find_all_wav_path(wave_dir)\n for input_file_name in allfiles:\n if not os.path.splitext(input_file_name)[-1] == '.wav':\n continue\n files_under_path +=1\n full_file_path = input_file_name\n if num_validation_files <=0:\n training_files.append(full_file_path)\n else:\n validation_files.append(full_file_path)\n num_validation_files -=1\n if files_under_path == 0:\n raise Exception(\"no wave file found!\")\n total_files +=files_under_path\n print(f'total files:{total_files}')\n \n return training_files, validation_files"
},
{
"identifier": "Generator",
"path": "models.py",
"snippet": "class Generator(torch.nn.Module):\n def __init__(self, h, F0_model):\n super(Generator, self).__init__()\n self.h = h\n self.num_kernels = len(h.resblock_kernel_sizes)\n self.num_upsamples = len(h.upsample_rates)\n self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))\n resblock = ResBlock1 if h.resblock == '1' else ResBlock2\n\n self.m_source = SourceModuleHnNSF(\n sampling_rate=h.sampling_rate,\n upsample_scale=np.prod(h.upsample_rates) * h.gen_istft_hop_size,\n harmonic_num=8, voiced_threshod=10)\n self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h.upsample_rates) * h.gen_istft_hop_size)\n self.noise_convs = nn.ModuleList()\n self.noise_res = nn.ModuleList()\n \n self.F0_model = F0_model\n \n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):\n self.ups.append(weight_norm(\n ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),\n k, u, padding=(k-u)//2)))\n\n c_cur = h.upsample_initial_channel // (2 ** (i + 1))\n \n if i + 1 < len(h.upsample_rates): #\n stride_f0 = np.prod(h.upsample_rates[i + 1:])\n self.noise_convs.append(Conv1d(\n h.gen_istft_n_fft + 2, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+1) // 2))\n self.noise_res.append(resblock(h, c_cur, 7, [1,3,5]))\n else:\n self.noise_convs.append(Conv1d(h.gen_istft_n_fft + 2, c_cur, kernel_size=1))\n self.noise_res.append(resblock(h, c_cur, 11, [1,3,5]))\n \n self.resblocks = nn.ModuleList()\n for i in range(len(self.ups)):\n ch = h.upsample_initial_channel//(2**(i+1))\n for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):\n self.resblocks.append(resblock(h, ch, k, d))\n\n self.post_n_fft = h.gen_istft_n_fft\n self.conv_post = weight_norm(Conv1d(ch, self.post_n_fft + 2, 7, 1, padding=3))\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))\n self.stft = TorchSTFT(filter_length=h.gen_istft_n_fft, hop_length=h.gen_istft_hop_size, win_length=h.gen_istft_n_fft)\n\n def forward(self, x):\n f0, _, _ = self.F0_model(x.unsqueeze(1))\n if len(f0.shape) == 1:\n f0 = f0.unsqueeze(0)\n \n f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t\n\n har_source, _, _ = self.m_source(f0)\n har_source = har_source.transpose(1, 2).squeeze(1)\n har_spec, har_phase = self.stft.transform(har_source)\n har = torch.cat([har_spec, har_phase], dim=1)\n \n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x_source = self.noise_convs[i](har)\n x_source = self.noise_res[i](x_source)\n \n x = self.ups[i](x)\n if i == self.num_upsamples - 1:\n x = self.reflection_pad(x)\n \n x = x + x_source\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i*self.num_kernels+j](x)\n else:\n xs += self.resblocks[i*self.num_kernels+j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :])\n phase = torch.sin(x[:, self.post_n_fft // 2 + 1:, :])\n\n return spec, phase\n\n def remove_weight_norm(self):\n print('Removing weight norm...')\n for l in self.ups:\n remove_weight_norm(l)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)"
},
{
"identifier": "MultiPeriodDiscriminator",
"path": "models.py",
"snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self):\n super(MultiPeriodDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList([\n DiscriminatorP(2),\n DiscriminatorP(3),\n DiscriminatorP(5),\n DiscriminatorP(7),\n DiscriminatorP(11),\n ])\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "MultiResSpecDiscriminator",
"path": "models.py",
"snippet": "class MultiResSpecDiscriminator(torch.nn.Module):\n\n def __init__(self,\n fft_sizes=[1024, 2048, 512],\n hop_sizes=[120, 240, 50],\n win_lengths=[600, 1200, 240],\n window=\"hann_window\"):\n\n super(MultiResSpecDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList([\n SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window),\n SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window),\n SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window)\n ])\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "feature_loss",
"path": "models.py",
"snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss*2"
},
{
"identifier": "generator_loss",
"path": "models.py",
"snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n l = torch.mean((1-dg)**2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses"
},
{
"identifier": "discriminator_loss",
"path": "models.py",
"snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n r_loss = torch.mean((1-dr)**2)\n g_loss = torch.mean(dg**2)\n loss += (r_loss + g_loss)\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses"
},
{
"identifier": "discriminator_TPRLS_loss",
"path": "models.py",
"snippet": "def discriminator_TPRLS_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n tau = 0.04\n m_DG = torch.median((dr-dg))\n L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG])\n loss += tau - F.relu(tau - L_rel)\n return loss"
},
{
"identifier": "generator_TPRLS_loss",
"path": "models.py",
"snippet": "def generator_TPRLS_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n for dg, dr in zip(disc_real_outputs, disc_generated_outputs):\n tau = 0.04\n m_DG = torch.median((dr-dg))\n L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG])\n loss += tau - F.relu(tau - L_rel)\n return loss"
},
{
"identifier": "plot_spectrogram",
"path": "utils.py",
"snippet": "def plot_spectrogram(spectrogram):\n fig, ax = plt.subplots(figsize=(10, 2))\n im = ax.imshow(spectrogram, aspect=\"auto\", origin=\"lower\",\n interpolation='none')\n plt.colorbar(im, ax=ax)\n\n fig.canvas.draw()\n plt.close()\n\n return fig"
},
{
"identifier": "scan_checkpoint",
"path": "utils.py",
"snippet": "def scan_checkpoint(cp_dir, prefix):\n pattern = os.path.join(cp_dir, prefix + '????????')\n cp_list = glob.glob(pattern)\n if len(cp_list) == 0:\n return None\n return sorted(cp_list)[-1]"
},
{
"identifier": "load_checkpoint",
"path": "utils.py",
"snippet": "def load_checkpoint(filepath, device):\n assert os.path.isfile(filepath)\n print(\"Loading '{}'\".format(filepath))\n checkpoint_dict = torch.load(filepath, map_location=device)\n print(\"Complete.\")\n return checkpoint_dict"
},
{
"identifier": "save_checkpoint",
"path": "utils.py",
"snippet": "def save_checkpoint(filepath, obj):\n print(\"Saving checkpoint to {}\".format(filepath))\n torch.save(obj, filepath)\n print(\"Complete.\")"
},
{
"identifier": "TorchSTFT",
"path": "stft.py",
"snippet": "class TorchSTFT(torch.nn.Module):\n def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'):\n super().__init__()\n self.filter_length = filter_length\n self.hop_length = hop_length\n self.win_length = win_length\n self.window = torch.from_numpy(get_window(window, win_length, fftbins=True).astype(np.float32))\n\n def transform(self, input_data):\n forward_transform = torch.stft(\n input_data,\n self.filter_length, self.hop_length, self.win_length, window=self.window.to(input_data.device),\n return_complex=True)\n\n return torch.abs(forward_transform), torch.angle(forward_transform)\n\n def inverse(self, magnitude, phase):\n inverse_transform = torch.istft(\n magnitude * torch.exp(phase * 1j),\n self.filter_length, self.hop_length, self.win_length, window=self.window.to(magnitude.device))\n\n return inverse_transform.unsqueeze(-2) # unsqueeze to stay consistent with conv_transpose1d implementation\n\n def forward(self, input_data):\n self.magnitude, self.phase = self.transform(input_data)\n reconstruction = self.inverse(self.magnitude, self.phase)\n return reconstruction"
},
{
"identifier": "JDCNet",
"path": "Utils/JDC/model.py",
"snippet": "class JDCNet(nn.Module):\n \"\"\"\n Joint Detection and Classification Network model for singing voice melody.\n \"\"\"\n def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01):\n super().__init__()\n self.num_class = num_class\n\n # input = (b, 1, 31, 513), b = batch size\n self.conv_block = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), # out: (b, 64, 31, 513)\n nn.BatchNorm2d(num_features=64),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.Conv2d(64, 64, 3, padding=1, bias=False), # (b, 64, 31, 513)\n )\n\n # res blocks\n self.res_block1 = ResBlock(in_channels=64, out_channels=128) # (b, 128, 31, 128)\n self.res_block2 = ResBlock(in_channels=128, out_channels=192) # (b, 192, 31, 32)\n self.res_block3 = ResBlock(in_channels=192, out_channels=256) # (b, 256, 31, 8)\n\n # pool block\n self.pool_block = nn.Sequential(\n nn.BatchNorm2d(num_features=256),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.MaxPool2d(kernel_size=(1, 4)), # (b, 256, 31, 2)\n nn.Dropout(p=0.2),\n )\n\n # maxpool layers (for auxiliary network inputs)\n # in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2)\n self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40))\n # in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2)\n self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20))\n # in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2)\n self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10))\n\n # in = (b, 640, 31, 2), out = (b, 256, 31, 2)\n self.detector_conv = nn.Sequential(\n nn.Conv2d(640, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.Dropout(p=0.2),\n )\n\n # input: (b, 31, 512) - resized from (b, 256, 31, 2)\n self.bilstm_classifier = nn.LSTM(\n input_size=512, hidden_size=256,\n batch_first=True, bidirectional=True) # (b, 31, 512)\n\n # input: (b, 31, 512) - resized from (b, 256, 31, 2)\n self.bilstm_detector = nn.LSTM(\n input_size=512, hidden_size=256,\n batch_first=True, bidirectional=True) # (b, 31, 512)\n\n # input: (b * 31, 512)\n self.classifier = nn.Linear(in_features=512, out_features=self.num_class) # (b * 31, num_class)\n\n # input: (b * 31, 512)\n self.detector = nn.Linear(in_features=512, out_features=2) # (b * 31, 2) - binary classifier\n\n # initialize weights\n self.apply(self.init_weights)\n\n def get_feature_GAN(self, x):\n seq_len = x.shape[-2]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n \n return poolblock_out.transpose(-1, -2)\n \n def get_feature(self, x):\n seq_len = x.shape[-2]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n \n return self.pool_block[2](poolblock_out)\n \n def forward(self, x):\n \"\"\"\n Returns:\n classification_prediction, detection_prediction\n sizes: (b, 31, 722), (b, 31, 2)\n \"\"\"\n ###############################\n # forward pass for classifier #\n ###############################\n seq_len = x.shape[-1]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n \n \n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n GAN_feature = poolblock_out.transpose(-1, -2)\n poolblock_out = self.pool_block[2](poolblock_out)\n \n # (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512)\n classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, seq_len, 512))\n classifier_out, _ = self.bilstm_classifier(classifier_out) # ignore the hidden states\n\n classifier_out = classifier_out.contiguous().view((-1, 512)) # (b * 31, 512)\n classifier_out = self.classifier(classifier_out)\n classifier_out = classifier_out.view((-1, seq_len, self.num_class)) # (b, 31, num_class)\n \n # sizes: (b, 31, 722), (b, 31, 2)\n # classifier output consists of predicted pitch classes per frame\n # detector output consists of: (isvoice, notvoice) estimates per frame\n return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out\n\n @staticmethod\n def init_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.kaiming_uniform_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv2d):\n nn.init.xavier_normal_(m.weight)\n elif isinstance(m, nn.LSTM) or isinstance(m, nn.LSTMCell):\n for p in m.parameters():\n if p.data is None:\n continue\n\n if len(p.shape) >= 2:\n nn.init.orthogonal_(p.data)\n else:\n nn.init.normal_(p.data)"
}
] | import warnings
import itertools
import os
import time
import argparse
import json
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DistributedSampler, DataLoader
from torch.distributed import init_process_group
from torch.nn.parallel import DistributedDataParallel
from env import AttrDict, build_env
from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist
from models import Generator, MultiPeriodDiscriminator, MultiResSpecDiscriminator, feature_loss, generator_loss,\
discriminator_loss, discriminator_TPRLS_loss, generator_TPRLS_loss
from utils import plot_spectrogram, scan_checkpoint, load_checkpoint, save_checkpoint
from stft import TorchSTFT
from Utils.JDC.model import JDCNet | 7,449 | warnings.simplefilter(action='ignore', category=FutureWarning)
torch.backends.cudnn.benchmark = True
def train(rank, a, h):
if h.num_gpus > 1:
init_process_group(backend=h.dist_config['dist_backend'], init_method=h.dist_config['dist_url'],
world_size=h.dist_config['world_size'] * h.num_gpus, rank=rank)
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda:{:d}'.format(rank))
F0_model = JDCNet(num_class=1, seq_len=192)
params = torch.load(h.F0_path)['model']
F0_model.load_state_dict(params)
generator = Generator(h, F0_model).to(device)
mpd = MultiPeriodDiscriminator().to(device)
msd = MultiResSpecDiscriminator().to(device)
stft = TorchSTFT(filter_length=h.gen_istft_n_fft, hop_length=h.gen_istft_hop_size, win_length=h.gen_istft_n_fft).to(device)
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
cp_do = scan_checkpoint(a.checkpoint_path, 'do_')
steps = 0
if cp_g is None or cp_do is None:
state_dict_do = None
last_epoch = -1
else:
state_dict_g = load_checkpoint(cp_g, device)
state_dict_do = load_checkpoint(cp_do, device)
generator.load_state_dict(state_dict_g['generator'])
mpd.load_state_dict(state_dict_do['mpd'])
msd.load_state_dict(state_dict_do['msd'])
steps = state_dict_do['steps'] + 1
last_epoch = state_dict_do['epoch']
if h.num_gpus > 1:
generator = DistributedDataParallel(generator, device_ids=[rank], find_unused_parameters=True).to(device)
mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device)
msd = DistributedDataParallel(msd, device_ids=[rank]).to(device)
optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
optim_d = torch.optim.AdamW(itertools.chain(msd.parameters(), mpd.parameters()),
h.learning_rate, betas=[h.adam_b1, h.adam_b2])
if state_dict_do is not None:
optim_g.load_state_dict(state_dict_do['optim_g'])
optim_d.load_state_dict(state_dict_do['optim_d'])
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)
training_filelist, validation_filelist = get_dataset_filelist(a)
| warnings.simplefilter(action='ignore', category=FutureWarning)
torch.backends.cudnn.benchmark = True
def train(rank, a, h):
if h.num_gpus > 1:
init_process_group(backend=h.dist_config['dist_backend'], init_method=h.dist_config['dist_url'],
world_size=h.dist_config['world_size'] * h.num_gpus, rank=rank)
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda:{:d}'.format(rank))
F0_model = JDCNet(num_class=1, seq_len=192)
params = torch.load(h.F0_path)['model']
F0_model.load_state_dict(params)
generator = Generator(h, F0_model).to(device)
mpd = MultiPeriodDiscriminator().to(device)
msd = MultiResSpecDiscriminator().to(device)
stft = TorchSTFT(filter_length=h.gen_istft_n_fft, hop_length=h.gen_istft_hop_size, win_length=h.gen_istft_n_fft).to(device)
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
cp_do = scan_checkpoint(a.checkpoint_path, 'do_')
steps = 0
if cp_g is None or cp_do is None:
state_dict_do = None
last_epoch = -1
else:
state_dict_g = load_checkpoint(cp_g, device)
state_dict_do = load_checkpoint(cp_do, device)
generator.load_state_dict(state_dict_g['generator'])
mpd.load_state_dict(state_dict_do['mpd'])
msd.load_state_dict(state_dict_do['msd'])
steps = state_dict_do['steps'] + 1
last_epoch = state_dict_do['epoch']
if h.num_gpus > 1:
generator = DistributedDataParallel(generator, device_ids=[rank], find_unused_parameters=True).to(device)
mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device)
msd = DistributedDataParallel(msd, device_ids=[rank]).to(device)
optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
optim_d = torch.optim.AdamW(itertools.chain(msd.parameters(), mpd.parameters()),
h.learning_rate, betas=[h.adam_b1, h.adam_b2])
if state_dict_do is not None:
optim_g.load_state_dict(state_dict_do['optim_g'])
optim_d.load_state_dict(state_dict_do['optim_d'])
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)
training_filelist, validation_filelist = get_dataset_filelist(a)
| trainset = MelDataset(training_filelist, h.segment_size, h.n_fft, h.num_mels, | 2 | 2023-12-16 03:53:55+00:00 | 12k |
UnbSky/Hanabi-AI-Assitant | main_connect.py | [
{
"identifier": "AIWindow",
"path": "game_ui.py",
"snippet": "class AIWindow(QMainWindow, Ui_AIUI):\n def __init__(self, url, cookie, model_data=None):\n super().__init__()\n self.setupUi(self)\n #self.setFixedSize(1300, 1200)\n self.setWindowTitle(\"HanabiAIAssitant\")\n\n self.nextstep_btn.setEnabled(False)\n self.prevstep_btn.setEnabled(False)\n self.openhistory_btn.setEnabled(False)\n\n #连接服务器的线程\n self.worker_thread = ClientThread(url, cookie)\n self.worker_thread.game_over_sig.connect(self.game_over)\n self.worker_thread.update_table_ui_sig.connect(self.update_table_info)\n self.worker_thread.handle_action_sig.connect(self.handle_action)\n self.worker_thread.ws_load_sig.connect(self.ws_load)\n self.worker_thread.game_start_sig.connect(self.game_start)\n self.worker_thread.table_joined_sig.connect(self.table_joined)\n\n self.enable_active_btn(False)\n self.game_controller = GameController(model_data)\n self.current_loss_card = None\n self.support_variant = [\n \"No Variant\", \"6 Suits\", \"Black (5 Suits)\", \"Black (6 Suits)\", \"Rainbow (5 Suits)\", \"Rainbow (6 Suits)\",\n \"Brown (5 Suits)\", \"Brown (6 Suits)\", \"Dark Rainbow (6 Suits)\", \"White (5 Suits)\", \"White (6 Suits)\",\n \"Pink (5 Suits)\", \"Pink (6 Suits)\", \"Gray (6 Suits)\"\n ]\n\n self.play_btn.clicked.connect(self.play_clicked)\n self.discard_btn.clicked.connect(self.discard_clicked)\n self.clue_btn.clicked.connect(self.clue_clicked)\n self.leave_btn.clicked.connect(self.leave_table_clicked)\n self.draw_state = False #发牌状态\n self.in_table = False\n\n self.nextstep_btn.clicked.connect(self.next_history_clicked)\n self.prevstep_btn.clicked.connect(self.prev_history_clicked)\n self.openhistory_btn.clicked.connect(self.open_history_clicked)\n\n self.tables = {}\n self.room_label.setWordWrap(3)\n\n #连接服务器\n self.worker_thread.start()\n #0表示在房间里,1表示等待中,2表示游戏中\n self.in_room_status = 0\n\n def send(self, command, data):\n if not isinstance(data, dict):\n data = {}\n self.ws.send(command + \" \" + json.dumps(data))\n print('debug: sent command \"' + command + '\"')\n\n def table_joined(self, data):\n tableID = data['tableID']\n table_info = self.tables[tableID]\n table_id = table_info[\"id\"]\n numPlayers = table_info[\"numPlayers\"]\n variant = table_info[\"variant\"]\n players = table_info[\"players\"]\n name = table_info[\"name\"]\n self.table_id = tableID\n table_str = f\"{name}\\n 在房间中 ID:{table_id} P:{numPlayers} \\n模式:{variant} \\n 玩家:{players}\"\n self.room_label.setText(table_str)\n self.in_room_status = 1\n self.update_table_info(self.tables)\n\n def game_over(self, data):\n table_str = f\"游戏结束了,点击退出离开房间\"\n self.room_label.setText(table_str)\n\n def handle_action(self, data):\n #游戏状态有以下几种\n #draw\n #clue-staus-turn\n #play-draw-status-turn\n #discard-draw-status-turn\n try:\n if data[\"type\"] == \"draw\":\n self.init_draw_round -= 1\n self.game_controller.online_handle_draw(data)\n self.update_all_game_info()\n #游戏开始了,唤醒一下\n if self.init_draw_round == 0:\n self.call_next_round(0)\n\n elif data[\"type\"] == \"play\":\n action_str = self.game_controller.online_handle_play(data)\n self.update_all_game_info()\n self.online_action_list.append(action_str)\n\n elif data[\"type\"] == \"discard\":\n action_str = self.game_controller.online_handle_discard(data)\n self.update_all_game_info()\n self.online_action_list.append(action_str)\n\n elif data[\"type\"] == \"clue\":\n action_str = self.game_controller.online_handle_clue(data)\n self.update_all_game_info()\n for clue_r in self.clue_replace:\n if clue_r in action_str:\n action_str = action_str.replace(clue_r, self.clue_replace[clue_r])\n break\n self.online_action_list.append(action_str)\n\n elif data[\"type\"] == \"turn\":\n pid = data[\"currentPlayerIndex\"]\n self.call_next_round(pid)\n self.update_all_game_info()\n\n elif data[\"type\"] == \"status\":\n self.game_controller.online_handle_status(data)\n self.update_game_state()\n else:\n print(data)\n except Exception as e:\n print(e)\n traceback.print_exc()\n\n def ws_load(self, ws):\n self.ws = ws[\"ws\"]\n\n def game_start(self, data):\n print(data)\n tableID = data[\"tableID\"]\n self.clear_UI()\n try:\n #基础游戏设置\n self.clue_replace = {\n \"I0\": '红色(I0)',\n \"I1\": '黄色(I1)',\n \"I2\": '绿色(I2)',\n \"I3\": '蓝色(I3)',\n \"I4\": '紫色(I4)',\n \"I5\": '青色(I5)',\n \"R1\": '数字1(R1)',\n \"R2\": '数字2(R2)',\n \"R3\": '数字3(R3)',\n \"R4\": '数字4(R4)',\n \"R5\": '数字5(R5)',\n }\n colors = [\n (255, 182, 193), # 淡红\n (255, 255, 224), # 淡黄\n (144, 238, 144), # 淡绿\n (173, 216, 230), # 淡蓝\n (221, 160, 221), # 淡紫\n (173, 216, 230) # 淡青\n ]\n self.index_to_color = [f\"background-color: rgb{color}\" for color in colors]\n\n table_info = self.tables[tableID]\n table_id = table_info[\"id\"]\n numPlayers = table_info[\"numPlayers\"]\n variant = table_info[\"variant\"]\n players = table_info[\"players\"]\n name = table_info[\"name\"]\n table_str = f\"{name}\\n 游戏开始 ID:{table_id} P:{numPlayers} \\n模式:{variant} \\n 玩家:{players}\"\n\n self.room_label.setText(table_str)\n self.in_room_status = 2\n self.update_table_info(self.tables)\n self.online_action_list = []\n self.active_pid = 0\n self.random_start = False\n self.server_game = True\n self.table_id = data[\"tableID\"]\n self.game_actions = []\n self.player_count = data[\"options\"][\"numPlayers\"]\n self.spectating = data[\"spectating\"]\n self.playerNames = data[\"playerNames\"]\n if self.player_count <= 3:\n self.card_count = 5\n else:\n self.card_count = 4\n self.varient_name = data[\"options\"][\"variantName\"]\n self.init_draw_round = self.card_count * self.player_count\n\n self.AI_pids = []\n #AI支持的玩法才会有AI预测\n if self.varient_name in self.support_variant:\n if self.spectating:\n for i in range(self.player_count):\n self.AI_pids.append(i)\n else:\n self.AI_pids = [data[\"ourPlayerIndex\"]]\n else:\n print(f\"Unsupported variant: {self.varient_name}\")\n\n game_args = dict(\n players=self.player_count,\n players_card=self.card_count,\n AIplayer=self.AI_pids,\n variant=self.varient_name,\n random_start=False,\n start_card=None,\n allow_drawback=False\n )\n\n gameconf = GameArgs(**game_args)\n self.game_controller.start_game(gameconf)\n special_dict = self.game_controller.special_dict\n if \"Dark Rainbow\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"暗彩虹\"\n self.index_to_color[special_dict.last_special_card] = \"background: qlineargradient(x1:0, y1:0, x2:1, y2:0, stop:0 #FF0000, stop:0.17 #FF7F00, stop:0.33 #FFFF00, stop:0.50 #00FF00, stop:0.67 #0000FF, stop:0.83 #4B0082, stop:1 #9400D3);\"\n elif \"Rainbow\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"彩虹\"\n self.index_to_color[special_dict.last_special_card] = \"background: qlineargradient(x1:0, y1:0, x2:1, y2:0, stop:0 #FFB6C1, stop:0.17 #FFE4C4, stop:0.33 #FFFFE0, stop:0.50 #98FB98, stop:0.67 #ADD8E6, stop:0.83 #E6E6FA, stop:1 #E3E3E3);\"\n elif \"Brown\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"棕色\"\n self.index_to_color[special_dict.last_special_card] = f\"background-color: rgb(205, 133, 63)\"\n elif \"Black\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"黑色\"\n self.index_to_color[special_dict.last_special_card] = f\"background-color: rgb(64, 64, 64)\"\n elif \"White\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"白色\"\n self.index_to_color[special_dict.last_special_card] = f\"background-color: rgb(250, 250, 250)\"\n elif \"Pink\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"粉色\"\n self.index_to_color[special_dict.last_special_card] = f\"background-color: rgb(255, 182, 193)\"\n elif \"Gray\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"灰色\"\n self.index_to_color[special_dict.last_special_card] = f\"background-color: rgb(220, 220, 220)\"\n self.setup_button_pannel(self.player_count)\n\n except Exception as e:\n print(\"ERROR:\", e)\n traceback.print_exc()\n return\n\n def open_history_clicked(self):\n options = QFileDialog.Options()\n options |= QFileDialog.ReadOnly\n file_name, _ = QFileDialog.getOpenFileName(self, 'Open File', '', 'JSON Files (*.json);;All Files (*)', options=options)\n if file_name:\n # 打开文件并读取内容\n with open(file_name, 'r') as file:\n try:\n # 解析JSON内容\n self.clear_UI()\n history_data = json.load(file)\n file_name = f\"{file_name}\"\n file_name = file_name.replace(\"ERROR_\",\"\")\n game_args = file_name.split(\"_\")\n print(game_args)\n fake_game_data = {\n \"tableID\": 1,\n \"spectating\": True,\n \"playerNames\": [\"AI0\",\"AI1\",\"AI2\",\"AI3\",\"AI4\",\"AI5\"],\n \"options\":{\n \"numPlayers\": int(game_args[1][0]),\n \"variantName\": game_args[0].split(\"/\")[-1],\n }\n }\n self.game_start(fake_game_data)\n self.current_history_index = 0\n self.game_controller.game_history = history_data\n\n action = self.game_controller.set_current_history(self.current_history_index)\n #print(\"Update History\")\n self.update_all_game_info()\n #print(\"update_all_game_info\")\n self.info_label.setText(f'选择操作: {action[\"str\"]}')\n #print(\"setText\")\n except Exception as e:\n print(f'Error reading history: {e}')\n traceback.print_exc()\n\n def next_history_clicked(self):\n if self.current_history_index < len(self.game_controller.game_history) - 1:\n self.current_history_index += 1\n action = self.game_controller.set_current_history(self.current_history_index)\n self.active_pid = self.game_controller.active_pid\n self.update_all_game_info()\n self.info_label.setText(f'选择操作: {action[\"str\"]}')\n\n def prev_history_clicked(self):\n if self.current_history_index > 0:\n self.current_history_index -= 1\n action = self.game_controller.set_current_history(self.current_history_index)\n self.active_pid = self.game_controller.active_pid\n self.update_all_game_info()\n self.info_label.setText(f'选择操作: {action[\"str\"]}')\n\n\n def ai_action_clicked(self, action_detail):\n act_type = action_detail[\"type\"]\n if act_type == \"play\":\n pid = action_detail[\"pid\"]\n pos = action_detail[\"pos\"]\n if pid != self.active_pid:\n print(\"ERROR: 不能操作非当前回合玩家的牌\")\n return\n order = self.game_controller.players[pid].online_order[pos]\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": ACTION.PLAY,\n \"target\": order,\n },\n )\n elif act_type == \"discard\":\n pid = action_detail[\"pid\"]\n pos = action_detail[\"pos\"]\n if pid != self.active_pid:\n print(\"ERROR: 不能操作非当前回合玩家的牌\")\n return\n order = self.game_controller.players[pid].online_order[pos]\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": ACTION.DISCARD,\n \"target\": order,\n },\n )\n elif act_type == \"clue\":\n from_pid = action_detail[\"from\"]\n to_pid = action_detail[\"to\"]\n clue_type = action_detail[\"clue_type\"]\n clue_value = action_detail[\"clue_value\"]\n if clue_type == 0:\n clue_type = ACTION.COLOR_CLUE\n else:\n clue_type = ACTION.RANK_CLUE\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": clue_type,\n \"target\": to_pid,\n \"value\": clue_value,\n },\n )\n\n def leave_table_clicked(self):\n try:\n if self.in_room_status == 2:\n #游戏已经开始,暴力退出\n self.send(\n \"tableUnattend\",\n {\n \"tableID\": self.table_id,\n },\n )\n elif self.in_room_status == 1:\n #游戏还没开始,退出房间\n self.send(\n \"tableLeave\",\n {\n \"tableID\": self.table_id,\n },\n )\n # 清空游戏相关的所有内容\n self.in_room_status = 0\n self.update_table_info(self.tables)\n self.info_label.setText(\"游戏未开始\")\n self.state_label.setText(\"无游戏\")\n self.room_label.setText(\"不在房间里\")\n self.clear_UI()\n except Exception:\n traceback.print_exc()\n\n def clear_UI(self):\n a = QWidget()\n self.cards_area.setWidget(a)\n a = QWidget()\n self.AIpredict_area.setWidget(a)\n a = QWidget()\n self.discard_area.setWidget(a)\n a = QWidget()\n self.history_area.setWidget(a)\n while self.Layout_Clue.count():\n item = self.Layout_Clue.takeAt(0)\n widget = item.widget()\n if widget:\n widget.setParent(None)\n widget.deleteLater()\n while self.Layout_score.count():\n item = self.Layout_score.takeAt(0)\n widget = item.widget()\n if widget:\n widget.setParent(None)\n widget.deleteLater()\n while self.Layout_toP.count():\n item = self.Layout_toP.takeAt(0)\n widget = item.widget()\n if widget:\n widget.setParent(None)\n widget.deleteLater()\n\n def join_table_click(self, table_id):\n password = self.password_edit.toPlainText()\n password = password.strip()\n table = self.tables[table_id]\n if table[\"running\"]:\n #游戏已经开始,进入观战\n print(\"Try tableSpectate\")\n self.send(\n \"tableSpectate\",\n {\n \"shadowingPlayerIndex\": -1,\n \"tableID\": table_id\n },\n )\n else:\n #正常加入\n if table[\"passwordProtected\"]:\n self.send(\n \"tableJoin\",\n {\n \"tableID\": table_id,\n \"password\": password\n },\n )\n else:\n self.send(\n \"tableJoin\",\n {\n \"tableID\": table_id,\n },\n )\n\n def update_table_info(self, tables):\n try:\n self.tables = tables\n lc = QHBoxLayout()\n for table in tables.values():\n table_id = table[\"id\"]\n numPlayers = table[\"numPlayers\"]\n running = table[\"running\"]\n variant = table[\"variant\"]\n passwordProtected = table[\"passwordProtected\"]\n players = table[\"players\"]\n name = table[\"name\"]\n\n table_str = f\"[{name}] \\n\" \\\n f\"ID:{table_id} | 玩家数: {numPlayers} | 游戏中: {running} \\n\" \\\n f\"模式: {variant} \\n\" \\\n f\"密码: {passwordProtected} \\n\" \\\n f\"[{','.join(players)}]\"\n\n cbutton = QPushButton(table_str)\n cbutton.setFixedSize(300, 140)\n cbutton.setStyleSheet(f\"text-align: center; font: 15px;\")\n cbutton.clicked.connect(lambda _, xx=table_id: self.join_table_click(xx))\n if self.in_room_status != 0:\n cbutton.setEnabled(False)\n else:\n cbutton.setEnabled(True)\n\n lc.addWidget(cbutton)\n\n lc.addStretch(1)\n a = QWidget()\n a.setLayout(lc)\n self.table_area.setWidget(a)\n except Exception as e:\n traceback.print_exc()\n\n def enable_active_btn(self, enable):\n self.discard_btn.setEnabled(enable)\n self.play_btn.setEnabled(enable)\n self.clue_btn.setEnabled(enable)\n\n def play_clicked(self):\n if self.current_pcard_pid_pos is None:\n print(\"ERROR: 没有选中的玩家牌\")\n return\n [pid, pos] = self.current_pcard_pid_pos\n if pid != self.active_pid:\n print(\"ERROR: 不能操作非当前回合玩家的牌\")\n return\n order = self.game_controller.players[pid].online_order[pos]\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": ACTION.PLAY,\n \"target\": order,\n },\n )\n\n def discard_clicked(self):\n if self.current_pcard_pid_pos is None:\n print(\"ERROR: 没有选中的玩家牌\")\n return\n [pid, pos] = self.current_pcard_pid_pos\n if pid != self.active_pid:\n print(\"ERROR: 不能操作非当前回合玩家的牌\")\n return\n order = self.game_controller.players[pid].online_order[pos]\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": ACTION.DISCARD,\n \"target\": order,\n },\n )\n\n def clue_clicked(self):\n try:\n if self.clue_choose is None:\n print(\"ERROR: 还没有选择提示\")\n return\n if self.splayer_choose is None:\n print(\"ERROR: 还没有选择提示的玩家\")\n return\n rpid = self.splayer_choose - self.active_pid\n if rpid < 0:\n rpid += self.player_count\n if self.clue_choose[0] == \"I\":\n clue_type = ACTION.COLOR_CLUE\n else:\n clue_type = ACTION.RANK_CLUE\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": clue_type,\n \"target\": self.splayer_choose,\n \"value\": int(self.clue_choose[1]),\n },\n )\n except Exception as e:\n print(e)\n traceback.print_exc()\n\n def call_next_round(self, active_pid):\n try:\n lb = QVBoxLayout()\n self.active_pid = active_pid\n self.info_label.setText(f\"轮到P{active_pid}【{self.playerNames[active_pid]}】操作\")\n if len(self.AI_pids) > 0:\n if active_pid == self.AI_pids[0] and (not self.spectating):\n self.info_label.setText(f\"轮到P{active_pid}【{self.playerNames[active_pid]}】【你自己!】操作\")\n\n if active_pid in self.AI_pids:\n #向AI查询预测结果\n if not self.spectating:\n self.enable_active_btn(True)\n action_predict = self.game_controller.call_AI_predict(active_pid, 10)\n for action in action_predict:\n action_token = action[\"token\"]\n action_probs = action[\"probs\"]\n action_detail = self.game_controller.get_action(action_token, active_pid)\n action_desc = action_detail[\"str\"]\n for clue_r in self.clue_replace:\n if clue_r in action_desc:\n #print(clue_r)\n action_desc = action_desc.replace(clue_r, self.clue_replace[clue_r])\n break\n action_str = f'{action_desc} \\n 概率:{action_probs*100:.2f}%'\n actionbutton = ValueButton(action_str, action_detail)\n actionbutton.setStyleSheet(f\"font: bold 18px;\")\n actionbutton.setFixedSize(330, 50)\n actionbutton.clicked.connect(lambda _, i=copy.deepcopy(action_detail): self.ai_action_clicked(i))\n if self.spectating:\n actionbutton.setEnabled(False)\n else:\n actionbutton.setEnabled(True)\n lb.addWidget(actionbutton)\n else:\n self.enable_active_btn(False)\n\n lb.addStretch(1)\n a = QWidget()\n a.setLayout(lb)\n self.AIpredict_area.setWidget(a)\n print(\"Call_next_round Finish\")\n except Exception as e:\n print(e)\n traceback.print_exc()\n\n def update_game_state(self):\n state_txt = f\"得分:{self.game_controller.score}/{sum(self.game_controller.Hrank)}\\n线索:{self.game_controller.clue}\" \\\n f\"\\n 错误:{self.game_controller.mistake} \\n 剩余牌:{self.game_controller.get_current_card()}\"\n self.state_label.setText(state_txt)\n\n def update_all_game_info(self):\n\n self.player_card_btns = []\n self.current_pcard_pid_pos = None\n\n # 更新所有的分数信息\n for i in range(len(self.game_controller.Irank)):\n score = self.game_controller.Irank[i]\n self.scoreLabels[i].setText(f\"{score}\")\n\n #更新历史消息\n lb = QVBoxLayout()\n for history_str in self.online_action_list:\n button = QPushButton(f\"{history_str}\", self)\n button.setFixedSize(200, 40)\n lb.addWidget(button)\n lb.addStretch(1)\n a = QWidget()\n a.setLayout(lb)\n\n self.history_area.setWidget(a)\n\n v_scrollbar = self.history_area.findChild(QScrollBar)\n if v_scrollbar:\n v_scrollbar.setValue(v_scrollbar.maximum())\n\n #更新弃牌堆信息\n lg = QGridLayout()\n ind = 0\n for card in self.game_controller.discard_cards:\n row = ind // 6\n column = ind % 6\n ind += 1\n card_index, card_rank = self.game_controller.parse_card(card)\n button = QPushButton(f\"{card_rank}\", self)\n button.setFixedSize(40, 40)\n button.setStyleSheet(f\"{self.index_to_color[card_index]}; font: bold 24px;\")\n lg.addWidget(button, row, column)\n #lg.addStretch(1)\n a = QWidget()\n a.setLayout(lg)\n self.discard_area.setWidget(a)\n\n # 更新UI中玩家的卡\n lb = QVBoxLayout()\n pid = 0\n for player in self.game_controller.players:\n p_head = QLabel(f\"Player: {pid} [{self.playerNames[pid]}]\")\n if pid == self.active_pid:\n p_head.setStyleSheet(f'font: bold 35px;')\n else:\n p_head.setStyleSheet(f'font-size: 30px;')\n lb.addWidget(p_head)\n cl = len(player.cards)\n lc = QHBoxLayout()\n for i in range(cl - 1, -1, -1):\n card = player.cards[i]\n kcard = player.known_cards[i]\n card_index, card_rank = self.game_controller.parse_card(card)\n kcard_index, kcard_rank = self.game_controller.parse_card(kcard)\n if card_index == 9:\n card_color = \"background-color: rgb(200, 200, 200)\"\n else:\n card_color = self.index_to_color[card_index]\n if kcard_index == 9:\n kcard_color = \"background-color: rgb(200, 200, 200)\"\n else:\n kcard_color = self.index_to_color[kcard_index]\n if card_rank == 9:\n card_rank = \"?\"\n if kcard_rank == 9:\n kcard_rank = \"?\"\n\n pcbutton = CardButton(f\"{card_rank}\", f\"{kcard_rank}\", card_color, kcard_color, self.pcard_clicked, [pid, i])\n self.player_card_btns.append(pcbutton)\n #self.current_tbutton_list.append(pcbutton)\n lc.addWidget(pcbutton)\n\n pid += 1\n lc.addStretch(0)\n lb.addLayout(lc)\n\n lb.addStretch(1)\n a = QWidget()\n a.setLayout(lb)\n self.cards_area.setWidget(a)\n\n def playerchose_clicked(self, pid):\n self.splayer_choose = pid\n for cbtn in self.splayer_btns:\n if cbtn.get_value() == pid:\n cbtn.setStyleSheet(cbtn.styleSheet().replace(\"24px;\", \"36px;\"))\n else:\n cbtn.setStyleSheet(cbtn.styleSheet().replace(\"36px;\", \"24px;\"))\n\n def cluechose_clicked(self, clue):\n self.clue_choose = clue\n for cbtn in self.clue_btns:\n if cbtn.get_value() == clue:\n cbtn.setStyleSheet(cbtn.styleSheet().replace(\"24px;\", \"36px;\"))\n else:\n cbtn.setStyleSheet(cbtn.styleSheet().replace(\"36px;\", \"24px;\"))\n\n def pcard_clicked(self, pid_pos):\n self.current_pcard_pid_pos = pid_pos\n #print(self.current_pcard_pid_pos)\n for cbtn in self.player_card_btns:\n #print(cbtn.get_value())\n if cbtn.value[0] == pid_pos[0] and cbtn.value[1] == pid_pos[1]:\n cbtn.highlight(True)\n else:\n cbtn.highlight(False)\n\n def setup_button_pannel(self, players):\n #选择玩家区域的所有玩家\n self.splayer_btns = []\n self.splayer_choose = None\n for i in range(0, players):\n button = ValueButton(f\"P{i}\", i)\n button.setFixedSize(60, 60)\n\n button.setStyleSheet(f\"background-color: rgb(220, 220, 220); font: bold 24px;\")\n self.splayer_btns.append(button)\n\n button.clicked.connect(lambda _, i=i: self.playerchose_clicked(i))\n self.Layout_toP.addWidget(button, i)\n\n #选择提示线索区域的所有线索\n self.clue_btns = []\n self.clue_choose = None\n special_dict = self.game_controller.special_dict\n colors = special_dict.last_special_card + 1\n for i in range(colors):\n #提示颜色\n if i == special_dict.last_special_card and (special_dict.all_color_rule or special_dict.no_color_rule):\n #彩虹无法被提示,Null也无法被提示\n continue\n clue = f\"I{i}\"\n button = ValueButton(clue, clue)\n button.setFixedSize(50, 50)\n\n button.setStyleSheet(f\"{self.index_to_color[i]}; font: bold 24px;\")\n self.clue_btns.append(button)\n\n button.clicked.connect(lambda _, clue=clue: self.cluechose_clicked(clue))\n self.Layout_Clue.addWidget(button, 0, i)\n for i in range(1, 6):\n #提示数字\n clue = f\"R{i}\"\n button = ValueButton(clue, clue)\n button.setFixedSize(50, 50)\n\n button.setStyleSheet(f\"background-color: rgb(200, 200, 200); font: bold 24px;\")\n self.clue_btns.append(button)\n\n button.clicked.connect(lambda _, clue=clue: self.cluechose_clicked(clue))\n self.Layout_Clue.addWidget(button, 1, i - 1)\n\n #得分区域的显示(对应五种颜色)\n self.scoreLabels = []\n for i in range(colors):\n #提示颜色\n clue = f\"I{i}\"\n sl = QLabel(\"0\")\n sl.setFixedSize(70, 70)\n sl.setStyleSheet(\"QLabel {\"\n f\"{self.index_to_color[i]};\"\n \"border: 2px solid black;\"\n \"border-radius: 5px;\"\n \"font: bold 40px;\"\n \"text-align: center;\"\n \"}\")\n self.scoreLabels.append(sl)\n self.Layout_score.addWidget(sl, i)"
},
{
"identifier": "load_model",
"path": "play_util.py",
"snippet": "def load_model(model_name=None):\n #device = 'cuda' if torch.cuda.is_available() else 'cpu' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc.\n device = 'cpu'\n\n acition_dict_toid = {}\n if model_name is None:\n dict_path = 'dict.json'\n else:\n dict_path = f'{model_name}/dict.json'\n with open(dict_path, 'r', encoding='utf-8') as file:\n acition_dict = json.load(file)\n acition_dict = [\"<pad>\"] + acition_dict\n ind = 0\n for action in acition_dict:\n acition_dict_toid[action] = ind\n #print(action, ind)\n ind += 1\n n_vacabs = len(acition_dict)\n output_acition_dict_toid = {}\n if model_name is None:\n output_dict_path = 'output_dict.json'\n else:\n output_dict_path = f'{model_name}/output_dict.json'\n with open(output_dict_path, 'r', encoding='utf-8') as file:\n output_acition_dict = json.load(file)\n output_acition_dict = [\"<pad>\"] + output_acition_dict\n ind = 0\n for action in output_acition_dict:\n output_acition_dict_toid[action] = ind\n #print(action, ind)\n ind += 1\n n_vacabs_out = len(output_acition_dict)\n\n if model_name is None:\n max_seq_len = 900\n dim = 384\n n_layers = 8\n n_heads = 8\n multiple_of = 32\n dropout = 0.0\n model_args = dict(\n dim=dim,\n n_layers=n_layers,\n n_heads=n_heads,\n n_kv_heads=n_heads,\n vocab_size=n_vacabs,\n output_vocab_size=n_vacabs_out,\n multiple_of=multiple_of,\n max_seq_len=max_seq_len,\n dropout=dropout,\n ) # s\n else:\n with open(f'{model_name}/config.json', 'r') as json_file:\n model_args = json.load(json_file)\n\n seed = 1337\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul\n torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn\n\n # init from a model saved in a specific directory\n if model_name is None:\n ckpt_path = 'best_valid.pth'\n else:\n ckpt_path = f'{model_name}/model.pth'\n state_dict = torch.load(ckpt_path, map_location=device)\n gptconf = ModelArgs(**model_args)\n model = Transformer(gptconf)\n unwanted_prefix = '_orig_mod.'\n for k, v in list(state_dict.items()):\n if k.startswith(unwanted_prefix):\n state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)\n model.load_state_dict(state_dict, strict=False)\n model.eval()\n model.to(device)\n return model, acition_dict, acition_dict_toid, output_acition_dict, output_acition_dict_toid, device"
}
] | import sys
import json
import requests
from PyQt5 import QtWidgets, QtCore
from game_ui import AIWindow
from play_util import load_model | 8,830 | def printf(*args):
print(*args, flush=True)
# Imports (3rd-party)
# Imports (local application)
# Authenticate, login to the WebSocket server, and run forever.
def login_to_hanab(username, password):
if username == "":
printf('error: "HANABI_USERNAME" is blank in the ".env" file')
sys.exit(1)
if password == "":
printf('error: "HANABI_PASSWORD" is blank in the ".env" file')
sys.exit(1)
# The official site uses HTTPS.
protocol = "https"
ws_protocol = "wss"
host = "hanab.live"
path = "/login"
ws_path = "/ws"
url = protocol + "://" + host + path
ws_url = ws_protocol + "://" + host + ws_path
printf('Authenticating to "' + url + '" with a username of "' + username + '".')
resp = requests.post(
url,
{
"username": username,
"password": password,
# This is normally supposed to be the version of the JavaScript
# client, but the server will also accept "bot" as a valid version.
"version": "bot",
},
)
# Handle failed authentication and other errors.
if resp.status_code != 200:
printf("Authentication failed:")
printf(resp.text)
sys.exit(1)
# Scrape the cookie from the response.
cookie = ""
for header in resp.headers.items():
if header[0] == "Set-Cookie":
cookie = header[1]
break
if cookie == "":
printf("Failed to parse the cookie from the authentication response headers:")
printf(resp.headers)
sys.exit(1)
return ws_url, cookie
def main():
with open(f'user_config.json', 'r') as json_file:
user_args = json.load(json_file)
username = user_args["username"]
password = user_args["password"]
model_name = user_args["model"]
printf("Load Model")
model, action_dict_toact, action_dict_toid, output_action_dict_toact, output_action_dict_toid, device = load_model(model_name)
printf("Try Login")
ws_url, cookie = login_to_hanab(username, password)
printf("Launch UI")
#QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
app = QtWidgets.QApplication(sys.argv)
| def printf(*args):
print(*args, flush=True)
# Imports (3rd-party)
# Imports (local application)
# Authenticate, login to the WebSocket server, and run forever.
def login_to_hanab(username, password):
if username == "":
printf('error: "HANABI_USERNAME" is blank in the ".env" file')
sys.exit(1)
if password == "":
printf('error: "HANABI_PASSWORD" is blank in the ".env" file')
sys.exit(1)
# The official site uses HTTPS.
protocol = "https"
ws_protocol = "wss"
host = "hanab.live"
path = "/login"
ws_path = "/ws"
url = protocol + "://" + host + path
ws_url = ws_protocol + "://" + host + ws_path
printf('Authenticating to "' + url + '" with a username of "' + username + '".')
resp = requests.post(
url,
{
"username": username,
"password": password,
# This is normally supposed to be the version of the JavaScript
# client, but the server will also accept "bot" as a valid version.
"version": "bot",
},
)
# Handle failed authentication and other errors.
if resp.status_code != 200:
printf("Authentication failed:")
printf(resp.text)
sys.exit(1)
# Scrape the cookie from the response.
cookie = ""
for header in resp.headers.items():
if header[0] == "Set-Cookie":
cookie = header[1]
break
if cookie == "":
printf("Failed to parse the cookie from the authentication response headers:")
printf(resp.headers)
sys.exit(1)
return ws_url, cookie
def main():
with open(f'user_config.json', 'r') as json_file:
user_args = json.load(json_file)
username = user_args["username"]
password = user_args["password"]
model_name = user_args["model"]
printf("Load Model")
model, action_dict_toact, action_dict_toid, output_action_dict_toact, output_action_dict_toid, device = load_model(model_name)
printf("Try Login")
ws_url, cookie = login_to_hanab(username, password)
printf("Launch UI")
#QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
app = QtWidgets.QApplication(sys.argv) | MyUiStart = AIWindow(ws_url, cookie, [model, action_dict_toact, action_dict_toid, output_action_dict_toact, output_action_dict_toid, device]) | 0 | 2023-12-17 03:57:47+00:00 | 12k |
m-abr/FCPCodebase | communication/Radio.py | [
{
"identifier": "Other_Robot",
"path": "world/commons/Other_Robot.py",
"snippet": "class Other_Robot():\n def __init__(self, unum, is_teammate) -> None:\n self.unum = unum # convenient variable to indicate uniform number (same as other robot's index + 1)\n self.is_self = False # convenient flag to indicate if this robot is self\n self.is_teammate = is_teammate # convenient variable to indicate if this robot is from our team\n self.is_visible = False # True if this robot was seen in the last message from the server (it doesn't mean we know its absolute location)\n self.body_parts_cart_rel_pos = dict() # cartesian relative position of the robot's visible body parts\n self.body_parts_sph_rel_pos = dict() # spherical relative position of the robot's visible body parts\n self.vel_filter = 0.3 # EMA filter coefficient applied to self.state_filtered_velocity\n self.vel_decay = 0.95 # velocity decay at every vision cycle (neutralized if velocity is updated)\n\n\n # State variables: these are computed when this robot is visible and when the original robot is able to self-locate\n self.state_fallen = False # true if the robot is lying down (updated when head is visible)\n self.state_last_update = 0 # World.time_local_ms when the state was last updated\n self.state_horizontal_dist = 0 # horizontal head distance if head is visible, otherwise, average horizontal distance of visible body parts (the distance is updated by vision or radio when state_abs_pos gets a new value, but also when the other player is not visible, by assuming its last position)\n self.state_abs_pos = None # 3D head position if head is visible, otherwise, 2D average position of visible body parts, or, 2D radio head position\n self.state_orientation = 0 # orientation based on pair of lower arms or feet, or average of both (WARNING: may be older than state_last_update) \n self.state_ground_area = None # (pt_2d,radius) projection of player area on ground (circle), not precise if farther than 3m (for performance), useful for obstacle avoidance when it falls\n self.state_body_parts_abs_pos = dict() # 3D absolute position of each body part\n self.state_filtered_velocity = np.zeros(3) # 3D filtered velocity (m/s) (if the head is not visible, the 2D part is updated and v.z decays)"
},
{
"identifier": "World",
"path": "world/World.py",
"snippet": "class World():\n STEPTIME = 0.02 # Fixed step time\n STEPTIME_MS = 20 # Fixed step time in milliseconds\n VISUALSTEP = 0.04 # Fixed visual step time\n VISUALSTEP_MS = 40 # Fixed visual step time in milliseconds\n\n # play modes in our favor\n M_OUR_KICKOFF = 0\n M_OUR_KICK_IN = 1\n M_OUR_CORNER_KICK = 2\n M_OUR_GOAL_KICK = 3\n M_OUR_FREE_KICK = 4\n M_OUR_PASS = 5\n M_OUR_DIR_FREE_KICK = 6\n M_OUR_GOAL = 7\n M_OUR_OFFSIDE = 8\n\n # play modes in their favor\n M_THEIR_KICKOFF = 9\n M_THEIR_KICK_IN = 10\n M_THEIR_CORNER_KICK = 11\n M_THEIR_GOAL_KICK = 12\n M_THEIR_FREE_KICK = 13\n M_THEIR_PASS = 14\n M_THEIR_DIR_FREE_KICK = 15\n M_THEIR_GOAL = 16\n M_THEIR_OFFSIDE = 17\n\n # neutral play modes\n M_BEFORE_KICKOFF = 18\n M_GAME_OVER = 19\n M_PLAY_ON = 20\n\n # play mode groups\n MG_OUR_KICK = 0\n MG_THEIR_KICK = 1\n MG_ACTIVE_BEAM = 2\n MG_PASSIVE_BEAM = 3\n MG_OTHER = 4 # play on, game over\n\n FLAGS_CORNERS_POS = ((-15,-10,0), (-15,+10,0), (+15,-10,0), (+15,+10,0))\n FLAGS_POSTS_POS = ((-15,-1.05,0.8),(-15,+1.05,0.8),(+15,-1.05,0.8),(+15,+1.05,0.8))\n\n def __init__(self,robot_type:int, team_name:str, unum:int, apply_play_mode_correction:bool, \n enable_draw:bool, logger:Logger, host:str) -> None:\n\n self.team_name = team_name # Name of our team\n self.team_name_opponent : str = None # Name of opponent team\n self.apply_play_mode_correction = apply_play_mode_correction # True to adjust ball position according to play mode\n self.step = 0 # Total number of received simulation steps (always in sync with self.time_local_ms)\n self.time_server = 0.0 # Time, in seconds, as indicated by the server (this time is NOT reliable, use only for synchronization between agents)\n self.time_local_ms = 0 # Reliable simulation time in milliseconds, use this when possible (it is incremented 20ms for every TCP message)\n self.time_game = 0.0 # Game time, in seconds, as indicated by the server\n self.goals_scored = 0 # Goals score by our team\n self.goals_conceded = 0 # Goals conceded by our team\n self.team_side_is_left : bool = None # True if our team plays on the left side (this value is later changed by the world parser)\n self.play_mode = None # Play mode of the soccer game, provided by the server \n self.play_mode_group = None # Certain play modes share characteristics, so it makes sense to group them\n self.flags_corners : dict = None # corner flags, key=(x,y,z), always assume we play on the left side\n self.flags_posts : dict = None # goal posts, key=(x,y,z), always assume we play on the left side\n self.ball_rel_head_sph_pos = np.zeros(3) # Ball position relative to head (spherical coordinates) (m, deg, deg)\n self.ball_rel_head_cart_pos = np.zeros(3) # Ball position relative to head (cartesian coordinates) (m)\n self.ball_rel_torso_cart_pos = np.zeros(3) # Ball position relative to torso (cartesian coordinates) (m)\n self.ball_rel_torso_cart_pos_history = deque(maxlen=20) # Ball position relative to torso history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position)\n self.ball_abs_pos = np.zeros(3) # Ball absolute position (up to date if self.ball_is_visible and self.robot.loc_is_up_to_date) (m)\n self.ball_abs_pos_history = deque(maxlen=20) # Ball absolute position history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position)\n self.ball_abs_pos_last_update = 0 # World.time_local_ms when self.ball_abs_pos was last updated by vision or radio\n self.ball_abs_vel = np.zeros(3) # Ball velocity vector based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead get_ball_abs_vel)\n self.ball_abs_speed = 0 # Ball scalar speed based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead ||get_ball_abs_vel||)\n self.ball_is_visible = False # True if the last server message contained vision information related to the ball\n self.is_ball_abs_pos_from_vision = False # True if ball_abs_pos originated from vision, False if it originated from radio\n self.ball_last_seen = 0 # World.time_local_ms when ball was last seen (note: may be different from self.ball_abs_pos_last_update)\n self.ball_cheat_abs_pos = np.zeros(3) # Absolute ball position provided by the server as cheat (m)\n self.ball_cheat_abs_vel = np.zeros(3) # Absolute velocity vector based on the last 2 values of self.ball_cheat_abs_pos (m/s)\n self.ball_2d_pred_pos = np.zeros((1,2)) # prediction of current and future 2D ball positions*\n self.ball_2d_pred_vel = np.zeros((1,2)) # prediction of current and future 2D ball velocities*\n self.ball_2d_pred_spd = np.zeros(1) # prediction of current and future 2D ball linear speeds*\n # *at intervals of 0.02 s until ball comes to a stop or gets out of bounds (according to prediction)\n self.lines = np.zeros((30,6)) # Position of visible lines, relative to head, start_pos+end_pos (spherical coordinates) (m, deg, deg, m, deg, deg)\n self.line_count = 0 # Number of visible lines\n self.vision_last_update = 0 # World.time_local_ms when last vision update was received\n self.vision_is_up_to_date = False # True if the last server message contained vision information\n self.teammates = [Other_Robot(i, True ) for i in range(1,12)] # List of teammates, ordered by unum\n self.opponents = [Other_Robot(i, False) for i in range(1,12)] # List of opponents, ordered by unum\n self.teammates[unum-1].is_self = True # This teammate is self\n self.draw = Draw(enable_draw, unum, host, 32769) # Draw object for current player\n self.team_draw = Draw(enable_draw, 0, host, 32769) # Draw object shared with teammates\n self.logger = logger\n self.robot = Robot(unum, robot_type)\n\n\n def log(self, msg:str):\n '''\n Shortcut for:\n\n self.logger.write(msg, True, self.step)\n\n Parameters\n ----------\n msg : str\n message to be written after the simulation step\n ''' \n self.logger.write(msg, True, self.step)\n\n def get_ball_rel_vel(self, history_steps:int):\n '''\n Get ball velocity, relative to torso (m/s)\n\n Parameters\n ----------\n history_steps : int\n number of history steps to consider [1,20]\n\n Examples\n --------\n get_ball_rel_vel(1) is equivalent to (current rel pos - last rel pos) / 0.04\n get_ball_rel_vel(2) is equivalent to (current rel pos - rel pos 0.08s ago) / 0.08\n get_ball_rel_vel(3) is equivalent to (current rel pos - rel pos 0.12s ago) / 0.12\n '''\n assert 1 <= history_steps <= 20, \"Argument 'history_steps' must be in range [1,20]\"\n\n if len(self.ball_rel_torso_cart_pos_history) == 0:\n return np.zeros(3)\n\n h_step = min(history_steps, len(self.ball_rel_torso_cart_pos_history))\n t = h_step * World.VISUALSTEP\n\n return (self.ball_rel_torso_cart_pos - self.ball_rel_torso_cart_pos_history[h_step-1]) / t\n\n def get_ball_abs_vel(self, history_steps:int):\n '''\n Get ball absolute velocity (m/s)\n\n Parameters\n ----------\n history_steps : int\n number of history steps to consider [1,20]\n\n Examples\n --------\n get_ball_abs_vel(1) is equivalent to (current abs pos - last abs pos) / 0.04\n get_ball_abs_vel(2) is equivalent to (current abs pos - abs pos 0.08s ago) / 0.08\n get_ball_abs_vel(3) is equivalent to (current abs pos - abs pos 0.12s ago) / 0.12\n '''\n assert 1 <= history_steps <= 20, \"Argument 'history_steps' must be in range [1,20]\"\n\n if len(self.ball_abs_pos_history) == 0:\n return np.zeros(3)\n\n h_step = min(history_steps, len(self.ball_abs_pos_history))\n t = h_step * World.VISUALSTEP\n\n return (self.ball_abs_pos - self.ball_abs_pos_history[h_step-1]) / t\n\n def get_predicted_ball_pos(self, max_speed):\n '''\n Get predicted 2D ball position when its predicted speed is equal to or less than `max_speed`\n In case that position exceeds the prediction horizon, the last available prediction is returned\n\n Parameters\n ----------\n max_speed : float\n maximum speed at which the ball will be moving at returned future position\n '''\n b_sp = self.ball_2d_pred_spd\n index = len(b_sp) - max( 1, np.searchsorted(b_sp[::-1], max_speed, side='right') )\n return self.ball_2d_pred_pos[index]\n \n def get_intersection_point_with_ball(self, player_speed):\n '''\n Get 2D intersection point with moving ball, based on `self.ball_2d_pred_pos`\n\n Parameters\n ----------\n player_speed : float\n average speed at which the robot will chase the ball\n\n Returns\n -------\n 2D intersection point : ndarray\n 2D intersection point with moving ball, assuming the robot moves at an avg. speed of `player_speed`\n intersection distance : float\n distance between current robot position and intersection point\n '''\n \n params = np.array([*self.robot.loc_head_position[:2], player_speed*0.02, *self.ball_2d_pred_pos.flat], np.float32)\n pred_ret = ball_predictor.get_intersection(params)\n return pred_ret[:2], pred_ret[2]\n \n def update(self):\n r = self.robot\n PM = self.play_mode\n W = World\n\n # reset variables\n r.loc_is_up_to_date = False \n r.loc_head_z_is_up_to_date = False\n\n # update play mode groups\n if PM in (W.M_PLAY_ON, W.M_GAME_OVER): # most common group\n self.play_mode_group = W.MG_OTHER\n elif PM in (W.M_OUR_KICKOFF, W.M_OUR_KICK_IN, W.M_OUR_CORNER_KICK, W.M_OUR_GOAL_KICK,\n W.M_OUR_OFFSIDE, W.M_OUR_PASS, W.M_OUR_DIR_FREE_KICK, W.M_OUR_FREE_KICK):\n self.play_mode_group = W.MG_OUR_KICK\n elif PM in (W.M_THEIR_KICK_IN, W.M_THEIR_CORNER_KICK, W.M_THEIR_GOAL_KICK, W.M_THEIR_OFFSIDE,\n W.M_THEIR_PASS, W.M_THEIR_DIR_FREE_KICK, W.M_THEIR_FREE_KICK, W.M_THEIR_KICKOFF):\n self.play_mode_group = W.MG_THEIR_KICK\n elif PM in (W.M_BEFORE_KICKOFF, W.M_THEIR_GOAL):\n self.play_mode_group = W.MG_ACTIVE_BEAM\n elif PM in (W.M_OUR_GOAL,):\n self.play_mode_group = W.MG_PASSIVE_BEAM\n elif PM is not None:\n raise ValueError(f'Unexpected play mode ID: {PM}')\n\n r.update_pose() # update forward kinematics\n\n if self.ball_is_visible:\n # Compute ball position, relative to torso\n self.ball_rel_torso_cart_pos = r.head_to_body_part_transform(\"torso\",self.ball_rel_head_cart_pos)\n\n if self.vision_is_up_to_date: # update vision based localization \n\n # Prepare all variables for localization\n\n feet_contact = np.zeros(6)\n\n lf_contact = r.frp.get('lf', None)\n rf_contact = r.frp.get('rf', None)\n if lf_contact is not None:\n feet_contact[0:3] = Matrix_4x4( r.body_parts[\"lfoot\"].transform ).translate( lf_contact[0:3] , True).get_translation()\n if rf_contact is not None:\n feet_contact[3:6] = Matrix_4x4( r.body_parts[\"rfoot\"].transform ).translate( rf_contact[0:3] , True).get_translation()\n\n ball_pos = np.concatenate(( self.ball_rel_head_cart_pos, self.ball_cheat_abs_pos))\n \n corners_list = [[key in self.flags_corners, 1.0, *key, *self.flags_corners.get(key,(0,0,0))] for key in World.FLAGS_CORNERS_POS]\n posts_list = [[key in self.flags_posts , 0.0, *key, *self.flags_posts.get( key,(0,0,0))] for key in World.FLAGS_POSTS_POS]\n all_landmarks = np.array(corners_list + posts_list, float)\n\n # Compute localization\n\n loc = localization.compute(\n r.feet_toes_are_touching['lf'],\n r.feet_toes_are_touching['rf'],\n feet_contact,\n self.ball_is_visible,\n ball_pos,\n r.cheat_abs_pos,\n all_landmarks,\n self.lines[0:self.line_count]) \n\n r.update_localization(loc, self.time_local_ms)\n\n # Update self in teammates list (only the most useful parameters, add as needed)\n me = self.teammates[r.unum-1]\n me.state_last_update = r.loc_last_update\n me.state_abs_pos = r.loc_head_position\n me.state_fallen = r.loc_head_z < 0.3 # uses same criterion as for other teammates - not as reliable as player.behavior.is_ready(\"Get_Up\")\n me.state_orientation = r.loc_torso_orientation\n me.state_ground_area = (r.loc_head_position[:2],0.2) # relevant for localization demo\n\n # Save last ball position to history at every vision cycle (even if not up to date) \n self.ball_abs_pos_history.appendleft(self.ball_abs_pos) # from vision or radio\n self.ball_rel_torso_cart_pos_history.appendleft(self.ball_rel_torso_cart_pos)\n\n '''\n Get ball position based on vision or play mode\n Sources:\n Corner kick position - rcssserver3d/plugin/soccer/soccerruleaspect/soccerruleaspect.cpp:1927 (May 2022)\n Goal kick position - rcssserver3d/plugin/soccer/soccerruleaspect/soccerruleaspect.cpp:1900 (May 2022)\n '''\n ball = None\n if self.apply_play_mode_correction:\n if PM == W.M_OUR_CORNER_KICK:\n ball = np.array([15, 5.483 if self.ball_abs_pos[1] > 0 else -5.483, 0.042], float)\n elif PM == W.M_THEIR_CORNER_KICK:\n ball = np.array([-15, 5.483 if self.ball_abs_pos[1] > 0 else -5.483, 0.042], float)\n elif PM in [W.M_OUR_KICKOFF, W.M_THEIR_KICKOFF, W.M_OUR_GOAL, W.M_THEIR_GOAL]:\n ball = np.array([0, 0, 0.042], float)\n elif PM == W.M_OUR_GOAL_KICK:\n ball = np.array([-14, 0, 0.042], float)\n elif PM == W.M_THEIR_GOAL_KICK:\n ball = np.array([14, 0, 0.042], float)\n\n # Discard hard-coded ball position if robot is near that position (in favor of its own vision)\n if ball is not None and np.linalg.norm(r.loc_head_position[:2] - ball[:2]) < 1:\n ball = None\n\n if ball is None and self.ball_is_visible and r.loc_is_up_to_date:\n ball = r.loc_head_to_field_transform( self.ball_rel_head_cart_pos )\n ball[2] = max(ball[2], 0.042) # lowest z = ball radius\n if PM != W.M_BEFORE_KICKOFF: # for compatibility with tests without active soccer rules\n ball[:2] = np.clip(ball[:2], [-15,-10], [15,10]) # force ball position to be inside field\n\n # Update internal ball position (also updated by Radio)\n if ball is not None:\n time_diff = (self.time_local_ms - self.ball_abs_pos_last_update) / 1000\n self.ball_abs_vel = (ball - self.ball_abs_pos) / time_diff\n self.ball_abs_speed = np.linalg.norm(self.ball_abs_vel)\n self.ball_abs_pos_last_update = self.time_local_ms\n self.ball_abs_pos = ball\n self.is_ball_abs_pos_from_vision = True\n\n # Velocity decay for teammates and opponents (it is later neutralized if the velocity is updated)\n for p in self.teammates:\n p.state_filtered_velocity *= p.vel_decay\n for p in self.opponents:\n p.state_filtered_velocity *= p.vel_decay\n\n # Update teammates and opponents\n if r.loc_is_up_to_date:\n for p in self.teammates:\n if not p.is_self: # if teammate is not self\n if p.is_visible: # if teammate is visible, execute full update\n self.update_other_robot(p)\n elif p.state_abs_pos is not None: # otherwise update its horizontal distance (assuming last known position)\n p.state_horizontal_dist = np.linalg.norm(r.loc_head_position[:2] - p.state_abs_pos[:2])\n\n for p in self.opponents:\n if p.is_visible: # if opponent is visible, execute full update\n self.update_other_robot(p)\n elif p.state_abs_pos is not None: # otherwise update its horizontal distance (assuming last known position)\n p.state_horizontal_dist = np.linalg.norm(r.loc_head_position[:2] - p.state_abs_pos[:2])\n\n # Update prediction of ball position/velocity\n if self.play_mode_group != W.MG_OTHER: # not 'play on' nor 'game over', so ball must be stationary\n self.ball_2d_pred_pos = self.ball_abs_pos[:2].copy().reshape(1, 2)\n self.ball_2d_pred_vel = np.zeros((1,2))\n self.ball_2d_pred_spd = np.zeros(1)\n\n elif self.ball_abs_pos_last_update == self.time_local_ms: # make new prediction for new ball position (from vision or radio)\n\n params = np.array([*self.ball_abs_pos[:2], *np.copy(self.get_ball_abs_vel(6)[:2])], np.float32)\n pred_ret = ball_predictor.predict_rolling_ball(params)\n sample_no = len(pred_ret) // 5 * 2\n self.ball_2d_pred_pos = pred_ret[:sample_no].reshape(-1, 2)\n self.ball_2d_pred_vel = pred_ret[sample_no:sample_no*2].reshape(-1, 2)\n self.ball_2d_pred_spd = pred_ret[sample_no*2:]\n\n elif len(self.ball_2d_pred_pos) > 1: # otherwise, advance to next predicted step, if available \n self.ball_2d_pred_pos = self.ball_2d_pred_pos[1:]\n self.ball_2d_pred_vel = self.ball_2d_pred_vel[1:]\n self.ball_2d_pred_spd = self.ball_2d_pred_spd[1:]\n\n r.update_imu(self.time_local_ms) # update imu (must be executed after localization)\n\n\n def update_other_robot(self,other_robot : Other_Robot):\n ''' \n Update other robot state based on the relative position of visible body parts\n (also updated by Radio, with the exception of state_orientation)\n '''\n o = other_robot\n r = self.robot\n\n # update body parts absolute positions\n o.state_body_parts_abs_pos = o.body_parts_cart_rel_pos.copy()\n for bp, pos in o.body_parts_cart_rel_pos.items():\n # Using the IMU could be beneficial if we see other robots but can't self-locate\n o.state_body_parts_abs_pos[bp] = r.loc_head_to_field_transform( pos, False )\n\n # auxiliary variables \n bps_apos = o.state_body_parts_abs_pos # read-only shortcut\n bps_2d_apos_list = [v[:2] for v in bps_apos.values()] # list of body parts' 2D absolute positions\n avg_2d_pt = np.average(bps_2d_apos_list, axis=0) # 2D avg pos of visible body parts\n head_is_visible = 'head' in bps_apos\n\n # evaluate robot's state (unchanged if head is not visible)\n if head_is_visible:\n o.state_fallen = bps_apos['head'][2] < 0.3\n\n # compute velocity if head is visible\n if o.state_abs_pos is not None:\n time_diff = (self.time_local_ms - o.state_last_update) / 1000\n if head_is_visible:\n # if last position is 2D, we assume that the z coordinate did not change, so that v.z=0\n old_p = o.state_abs_pos if len(o.state_abs_pos)==3 else np.append(o.state_abs_pos, bps_apos['head'][2]) \n velocity = (bps_apos['head'] - old_p) / time_diff\n decay = o.vel_decay # neutralize decay in all axes\n else: # if head is not visible, we only update the x & y components of the velocity\n velocity = np.append( (avg_2d_pt - o.state_abs_pos[:2]) / time_diff, 0)\n decay = (o.vel_decay,o.vel_decay,1) # neutralize decay (except in the z-axis)\n # apply filter\n if np.linalg.norm(velocity - o.state_filtered_velocity) < 4: # otherwise assume it was beamed\n o.state_filtered_velocity /= decay # neutralize decay\n o.state_filtered_velocity += o.vel_filter * (velocity-o.state_filtered_velocity)\n\n # compute robot's position (preferably based on head) \n if head_is_visible: \n o.state_abs_pos = bps_apos['head'] # 3D head position, if head is visible\n else: \n o.state_abs_pos = avg_2d_pt # 2D avg pos of visible body parts\n\n # compute robot's horizontal distance (head distance, or avg. distance of visible body parts)\n o.state_horizontal_dist = np.linalg.norm(r.loc_head_position[:2] - o.state_abs_pos[:2])\n \n # compute orientation based on pair of lower arms or feet, or average of both\n lr_vec = None\n if 'llowerarm' in bps_apos and 'rlowerarm' in bps_apos:\n lr_vec = bps_apos['rlowerarm'] - bps_apos['llowerarm']\n \n if 'lfoot' in bps_apos and 'rfoot' in bps_apos:\n if lr_vec is None:\n lr_vec = bps_apos['rfoot'] - bps_apos['lfoot']\n else:\n lr_vec = (lr_vec + (bps_apos['rfoot'] - bps_apos['lfoot'])) / 2\n \n if lr_vec is not None:\n o.state_orientation = atan2(lr_vec[1],lr_vec[0]) * 180 / pi + 90\n\n # compute projection of player area on ground (circle) \n if o.state_horizontal_dist < 4: # we don't need precision if the robot is farther than 4m \n max_dist = np.max(np.linalg.norm(bps_2d_apos_list - avg_2d_pt, axis=1))\n else:\n max_dist = 0.2\n o.state_ground_area = (avg_2d_pt,max_dist)\n\n # update timestamp\n o.state_last_update = self.time_local_ms"
}
] | from typing import List
from world.commons.Other_Robot import Other_Robot
from world.World import World
import numpy as np | 8,222 |
class Radio():
'''
map limits are hardcoded:
teammates/opponents positions (x,y) in ([-16,16],[-11,11])
ball position (x,y) in ([-15,15],[-10,10])
known server limitations:
claimed: all ascii from 0x20 to 0x7E except ' ', '(', ')'
bugs:
- ' or " clip the message
- '\' at the end or near another '\'
- ';' at beginning of message
'''
# map limits are hardcoded:
# lines, columns, half lines index, half cols index, (lines-1)/x_span, (cols-1)/y_span, combinations, combinations*2states,
TP = 321,221,160,110,10, 10,70941,141882 # teammate position
OP = 201,111,100,55, 6.25,5, 22311,44622 # opponent position
BP = 301,201,150,100,10, 10,60501 # ball position
SYMB = "!#$%&*+,-./0123456789:<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]^_`abcdefghijklmnopqrstuvwxyz{|}~;"
SLEN = len(SYMB)
SYMB_TO_IDX = {ord(s):i for i,s in enumerate(SYMB)}
def __init__(self, world : World, commit_announcement) -> None:
self.world = world
self.commit_announcement = commit_announcement
r = world.robot
t = world.teammates
o = world.opponents
self.groups = ( # player team/unum, group has ball?, self in group?
[(t[9],t[10],o[6],o[7],o[8],o[9],o[10]), True ], # 2 teammates, 5 opponents, ball
[(t[0],t[1], t[2],t[3],t[4],t[5],t[6] ), False], # 7 teammates
[(t[7],t[8], o[0],o[1],o[2],o[3],o[4],o[5]), False] # 2 teammates, 6 opponents
)
for g in self.groups: # add 'self in group?'
g.append(any(i.is_self for i in g[0]))
def get_player_combination(self, pos, is_unknown, is_down, info):
''' Returns combination (0-based) and number of possible combinations '''
if is_unknown:
return info[7]+1, info[7]+2 # return unknown combination
x,y = pos[:2]
if x < -17 or x > 17 or y < -12 or y > 12:
return info[7], info[7]+2 # return out of bounds combination (if it exceeds 1m in any axis)
# convert to int to avoid overflow later
l = int(np.clip( round(info[4]*x+info[2]), 0, info[0]-1 )) # absorb out of bounds positions (up to 1m in each axis)
c = int(np.clip( round(info[5]*y+info[3]), 0, info[1]-1 ))
return (l*info[1]+c)+(info[6] if is_down else 0), info[7]+2 # return valid combination
def get_ball_combination(self, x, y):
''' Returns combination (0-based) and number of possible combinations '''
# if ball is out of bounds, we force it in
l = int(np.clip( round(Radio.BP[4]*x+Radio.BP[2]), 0, Radio.BP[0]-1 ))
c = int(np.clip( round(Radio.BP[5]*y+Radio.BP[3]), 0, Radio.BP[1]-1 ))
return l*Radio.BP[1]+c, Radio.BP[6] # return valid combination
def get_ball_position(self,comb):
l = comb // Radio.BP[1]
c = comb % Radio.BP[1]
return np.array([l/Radio.BP[4]-15, c/Radio.BP[5]-10, 0.042]) # assume ball is on ground
def get_player_position(self,comb, info):
if comb == info[7]: return -1 # player is out of bounds
if comb == info[7]+1: return -2 # player is in unknown location
is_down = comb >= info[6]
if is_down:
comb -= info[6]
l = comb // info[1]
c = comb % info[1]
return l/info[4]-16, c/info[5]-11, is_down
def check_broadcast_requirements(self):
'''
Check if broadcast group is valid
Returns
-------
ready : bool
True if all requirements are met
Sequence: g0,g1,g2, ig0,ig1,ig2, iig0,iig1,iig2 (whole cycle: 0.36s)
igx means 'incomplete group', where <=1 element can be MIA recently
iigx means 'very incomplete group', where <=2 elements can be MIA recently
Rationale: prevent incomplete messages from monopolizing the broadcast space
However:
- 1st round: when 0 group members are missing, that group will update 3 times every 0.36s
- 2nd round: when 1 group member is recently missing, that group will update 2 times every 0.36s
- 3rd round: when 2 group members are recently missing, that group will update 1 time every 0.36s
- when >2 group members are recently missing, that group will not be updated
Players that have never been seen or heard are not considered for the 'recently missing'.
If there is only 1 group member since the beginning, the respective group can be updated, except in the 1st round.
In this way, the 1st round cannot be monopolized by clueless agents, which is important during games with 22 players.
'''
w = self.world
r = w.robot
ago40ms = w.time_local_ms - 40
ago370ms = w.time_local_ms - 370 # maximum delay (up to 2 MIAs) is 360ms because radio has a delay of 20ms (otherwise max delay would be 340ms)
|
class Radio():
'''
map limits are hardcoded:
teammates/opponents positions (x,y) in ([-16,16],[-11,11])
ball position (x,y) in ([-15,15],[-10,10])
known server limitations:
claimed: all ascii from 0x20 to 0x7E except ' ', '(', ')'
bugs:
- ' or " clip the message
- '\' at the end or near another '\'
- ';' at beginning of message
'''
# map limits are hardcoded:
# lines, columns, half lines index, half cols index, (lines-1)/x_span, (cols-1)/y_span, combinations, combinations*2states,
TP = 321,221,160,110,10, 10,70941,141882 # teammate position
OP = 201,111,100,55, 6.25,5, 22311,44622 # opponent position
BP = 301,201,150,100,10, 10,60501 # ball position
SYMB = "!#$%&*+,-./0123456789:<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]^_`abcdefghijklmnopqrstuvwxyz{|}~;"
SLEN = len(SYMB)
SYMB_TO_IDX = {ord(s):i for i,s in enumerate(SYMB)}
def __init__(self, world : World, commit_announcement) -> None:
self.world = world
self.commit_announcement = commit_announcement
r = world.robot
t = world.teammates
o = world.opponents
self.groups = ( # player team/unum, group has ball?, self in group?
[(t[9],t[10],o[6],o[7],o[8],o[9],o[10]), True ], # 2 teammates, 5 opponents, ball
[(t[0],t[1], t[2],t[3],t[4],t[5],t[6] ), False], # 7 teammates
[(t[7],t[8], o[0],o[1],o[2],o[3],o[4],o[5]), False] # 2 teammates, 6 opponents
)
for g in self.groups: # add 'self in group?'
g.append(any(i.is_self for i in g[0]))
def get_player_combination(self, pos, is_unknown, is_down, info):
''' Returns combination (0-based) and number of possible combinations '''
if is_unknown:
return info[7]+1, info[7]+2 # return unknown combination
x,y = pos[:2]
if x < -17 or x > 17 or y < -12 or y > 12:
return info[7], info[7]+2 # return out of bounds combination (if it exceeds 1m in any axis)
# convert to int to avoid overflow later
l = int(np.clip( round(info[4]*x+info[2]), 0, info[0]-1 )) # absorb out of bounds positions (up to 1m in each axis)
c = int(np.clip( round(info[5]*y+info[3]), 0, info[1]-1 ))
return (l*info[1]+c)+(info[6] if is_down else 0), info[7]+2 # return valid combination
def get_ball_combination(self, x, y):
''' Returns combination (0-based) and number of possible combinations '''
# if ball is out of bounds, we force it in
l = int(np.clip( round(Radio.BP[4]*x+Radio.BP[2]), 0, Radio.BP[0]-1 ))
c = int(np.clip( round(Radio.BP[5]*y+Radio.BP[3]), 0, Radio.BP[1]-1 ))
return l*Radio.BP[1]+c, Radio.BP[6] # return valid combination
def get_ball_position(self,comb):
l = comb // Radio.BP[1]
c = comb % Radio.BP[1]
return np.array([l/Radio.BP[4]-15, c/Radio.BP[5]-10, 0.042]) # assume ball is on ground
def get_player_position(self,comb, info):
if comb == info[7]: return -1 # player is out of bounds
if comb == info[7]+1: return -2 # player is in unknown location
is_down = comb >= info[6]
if is_down:
comb -= info[6]
l = comb // info[1]
c = comb % info[1]
return l/info[4]-16, c/info[5]-11, is_down
def check_broadcast_requirements(self):
'''
Check if broadcast group is valid
Returns
-------
ready : bool
True if all requirements are met
Sequence: g0,g1,g2, ig0,ig1,ig2, iig0,iig1,iig2 (whole cycle: 0.36s)
igx means 'incomplete group', where <=1 element can be MIA recently
iigx means 'very incomplete group', where <=2 elements can be MIA recently
Rationale: prevent incomplete messages from monopolizing the broadcast space
However:
- 1st round: when 0 group members are missing, that group will update 3 times every 0.36s
- 2nd round: when 1 group member is recently missing, that group will update 2 times every 0.36s
- 3rd round: when 2 group members are recently missing, that group will update 1 time every 0.36s
- when >2 group members are recently missing, that group will not be updated
Players that have never been seen or heard are not considered for the 'recently missing'.
If there is only 1 group member since the beginning, the respective group can be updated, except in the 1st round.
In this way, the 1st round cannot be monopolized by clueless agents, which is important during games with 22 players.
'''
w = self.world
r = w.robot
ago40ms = w.time_local_ms - 40
ago370ms = w.time_local_ms - 370 # maximum delay (up to 2 MIAs) is 360ms because radio has a delay of 20ms (otherwise max delay would be 340ms) | group : List[Other_Robot] | 0 | 2023-12-16 23:40:23+00:00 | 12k |
quocanh34/magic-animate-modified | magicanimate/models/unet_controlnet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "InflatedConv3d",
"path": "magicanimate/models/resnet.py",
"snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
}
] | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from magicanimate.models.unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d
from diffusers.utils import WEIGHTS_NAME
import os
import json
import torch
import torch.nn as nn
import torch.utils.checkpoint | 7,794 | def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn": | self.mid_block = UNetMidBlock3DCrossAttn( | 3 | 2023-12-15 01:22:37+00:00 | 12k |
cvlab-yonsei/RankMixup | calibrate/evaluation/calibrate_evaluator.py | [
{
"identifier": "DatasetEvaluator",
"path": "calibrate/evaluation/evaluator.py",
"snippet": "class DatasetEvaluator(metaclass=ABCMeta):\n \"\"\"\n Base class for a dataset evaluator\n \"\"\"\n @abstractmethod\n def reset(self):\n \"\"\"\n Preparation for a new round of evaluation.\n Should be called before starting a round of evaluation.\n \"\"\"\n pass\n\n @abstractmethod\n def update(self):\n \"\"\"\n Update status given a mini-batch results\n \"\"\"\n pass\n\n def curr_score(self):\n \"\"\"\n Return curr score after last batch\n \"\"\"\n pass\n\n @abstractmethod\n def mean_score(self):\n \"\"\"\n Return mean score across all classes/samples\n \"\"\"\n pass\n\n def class_score(self):\n \"\"\"\n Return score for different classes\n \"\"\"\n pass\n\n @abstractmethod\n def num_samples(self):\n \"\"\"\n return the evaluated samples\n \"\"\"\n pass\n\n @abstractmethod\n def main_metric(self):\n \"return the name of the main metric\"\n pass"
},
{
"identifier": "ECELoss",
"path": "calibrate/evaluation/metrics.py",
"snippet": "class ECELoss(nn.Module):\n '''\n Compute ECE (Expected Calibration Error)\n '''\n def __init__(self, n_bins=15):\n super(ECELoss, self).__init__()\n bin_boundaries = torch.linspace(0, 1, n_bins + 1)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n\n def forward(self, logits, labels):\n softmaxes = F.softmax(logits, dim=1)\n confidences, predictions = torch.max(softmaxes, 1)\n accuracies = predictions.eq(labels)\n\n ece = torch.zeros(1, device=logits.device)\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n # Calculated |confidence - accuracy| in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = accuracies[in_bin].float().mean()\n avg_confidence_in_bin = confidences[in_bin].mean()\n ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n\n return ece"
},
{
"identifier": "AdaptiveECELoss",
"path": "calibrate/evaluation/metrics.py",
"snippet": "class AdaptiveECELoss(nn.Module):\n '''\n Compute Adaptive ECE\n '''\n def __init__(self, n_bins=15):\n super(AdaptiveECELoss, self).__init__()\n self.nbins = n_bins\n\n def histedges_equalN(self, x):\n npt = len(x)\n return np.interp(np.linspace(0, npt, self.nbins + 1),\n np.arange(npt),\n np.sort(x))\n def forward(self, logits, labels):\n softmaxes = F.softmax(logits, dim=1)\n confidences, predictions = torch.max(softmaxes, 1)\n accuracies = predictions.eq(labels)\n n, bin_boundaries = np.histogram(confidences.cpu().detach(), self.histedges_equalN(confidences.cpu().detach()))\n #print(n,confidences,bin_boundaries)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n ece = torch.zeros(1, device=logits.device)\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n # Calculated |confidence - accuracy| in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = accuracies[in_bin].float().mean()\n avg_confidence_in_bin = confidences[in_bin].mean()\n ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n return ece"
},
{
"identifier": "ClasswiseECELoss",
"path": "calibrate/evaluation/metrics.py",
"snippet": "class ClasswiseECELoss(nn.Module):\n '''\n Compute Classwise ECE\n '''\n def __init__(self, n_bins=15):\n super(ClasswiseECELoss, self).__init__()\n bin_boundaries = torch.linspace(0, 1, n_bins + 1)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n\n def forward(self, logits, labels):\n num_classes = int((torch.max(labels) + 1).item())\n softmaxes = F.softmax(logits, dim=1)\n per_class_sce = None\n\n for i in range(num_classes):\n class_confidences = softmaxes[:, i]\n class_sce = torch.zeros(1, device=logits.device)\n labels_in_class = labels.eq(i) # one-hot vector of all positions where the label belongs to the class i\n\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n in_bin = class_confidences.gt(bin_lower.item()) * class_confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = labels_in_class[in_bin].float().mean()\n avg_confidence_in_bin = class_confidences[in_bin].mean()\n class_sce += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n\n if (i == 0):\n per_class_sce = class_sce\n else:\n per_class_sce = torch.cat((per_class_sce, class_sce), dim=0)\n\n sce = torch.mean(per_class_sce)\n return sce"
},
{
"identifier": "OELoss",
"path": "calibrate/evaluation/metrics.py",
"snippet": "class OELoss(nn.Module):\n '''\n Compute OE (Overconfidence Error)\n '''\n def __init__(self, n_bins=15):\n super(OELoss, self).__init__()\n bin_boundaries = torch.linspace(0, 1, n_bins + 1)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n\n def forward(self, logits, labels):\n softmaxes = F.softmax(logits, dim=1)\n confidences, predictions = torch.max(softmaxes, 1)\n accuracies = predictions.eq(labels)\n\n oe = torch.zeros(1, device=logits.device)\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n # Calculated |confidence - accuracy| in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = accuracies[in_bin].float().mean()\n avg_confidence_in_bin = confidences[in_bin].mean()\n oe += avg_confidence_in_bin * F.relu(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n\n return oe"
},
{
"identifier": "UELoss",
"path": "calibrate/evaluation/metrics.py",
"snippet": "class UELoss(nn.Module):\n '''\n Compute UE (Underconfidence Error)\n '''\n def __init__(self, n_bins=15):\n super(UELoss, self).__init__()\n bin_boundaries = torch.linspace(0, 1, n_bins + 1)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n\n def forward(self, logits, labels):\n softmaxes = F.softmax(logits, dim=1)\n confidences, predictions = torch.max(softmaxes, 1)\n accuracies = predictions.eq(labels)\n\n ue = torch.zeros(1, device=logits.device)\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n # Calculated |confidence - accuracy| in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = accuracies[in_bin].float().mean()\n avg_confidence_in_bin = confidences[in_bin].mean()\n ue += avg_confidence_in_bin * F.relu(accuracy_in_bin - avg_confidence_in_bin) * prop_in_bin\n\n return ue"
},
{
"identifier": "ReliabilityDiagram",
"path": "calibrate/evaluation/reliability_diagram.py",
"snippet": "class ReliabilityDiagram(object):\n \"\"\"\n Plot Confidence Histogram and Reliability Diagram to visualize miscalibration.\n On classification, plot the gaps between average confidence and observed accuracy bin-wise over the confidence\n space [1]_, [2]_.\n On detection, plot the miscalibration w.r.t. the additional regression information provided (1-D or 2-D) [3]_.\n\n Parameters\n ----------\n bins : int or iterable, default: 10\n Number of bins used by the ACE/ECE/MCE.\n On detection mode: if int, use same amount of bins for each dimension (nx1 = nx2 = ... = bins).\n If iterable, use different amount of bins for each dimension (nx1, nx2, ... = bins).\n equal_intervals : bool, optional, default: True\n If True, the bins have the same width. If False, the bins are splitted to equalize\n the number of samples in each bin.\n detection : bool, default: False\n If False, the input array 'X' is treated as multi-class confidence input (softmax)\n with shape (n_samples, [n_classes]).\n If True, the input array 'X' is treated as a box predictions with several box features (at least\n box confidence must be present) with shape (n_samples, [n_box_features]).\n fmin : float, optional, default: None\n Minimum value for scale color.\n fmax : float, optional, default: None\n Maximum value for scale color.\n metric : str, default: 'ECE'\n Metric to measure miscalibration. Might be either 'ECE', 'ACE' or 'MCE'.\n\n References\n ----------\n .. [1] Chuan Guo, Geoff Pleiss, Yu Sun and Kilian Q. Weinberger:\n \"On Calibration of Modern Neural Networks.\"\n Proceedings of the 34th International Conference on Machine Learning-Volume 70. JMLR. org, 2017.\n `Get source online <https://arxiv.org/abs/1706.04599>`_\n\n .. [2] A. Niculescu-Mizil and R. Caruana:\n “Predicting good probabilities with supervised learning.”\n Proceedings of the 22nd International Conference on Machine Learning, 2005, pp. 625–632.\n `Get source online <https://www.cs.cornell.edu/~alexn/papers/calibration.icml05.crc.rev3.pdf>`_\n\n .. [3] Fabian Küppers, Jan Kronenberger, Amirhossein Shantia and Anselm Haselhoff:\n \"Multivariate Confidence Calibration for Object Detection.\"\n The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2020.\n `Get source online <https://openaccess.thecvf.com/content_CVPRW_2020/papers/w20/Kuppers_Multivariate_Confidence_Calibration_for_Object_Detection_CVPRW_2020_paper.pdf>`_\n \"\"\"\n\n def __init__(self, bins: Union[int, Iterable[int]] = 10, equal_intervals: bool = True,\n detection: bool = False, sample_threshold: int = 1,\n fmin: float = None, fmax: float = None,\n metric: str = 'ECE', style: str = \"curve\", **kwargs):\n \"\"\" Constructor. For detailed parameter documentation view classdocs. \"\"\"\n\n assert style in [\"curve\", \"bar\"]\n self.bins = bins\n self.detection = detection\n self.sample_threshold = sample_threshold\n self.fmin = fmin\n self.fmax = fmax\n self.metric = metric\n self.style = style\n\n if 'feature_names' in kwargs:\n self.feature_names = kwargs['feature_names']\n\n if 'title_suffix' in kwargs:\n self.title_suffix = kwargs['title_suffix']\n\n self._miscalibration = _Miscalibration(bins=bins, equal_intervals=equal_intervals,\n detection=detection, sample_threshold=sample_threshold)\n\n def plot(self, X: Union[Iterable[np.ndarray], np.ndarray], y: Union[Iterable[np.ndarray], np.ndarray],\n batched: bool = False, uncertainty: str = None, filename: str = None, tikz: bool = False,\n title_suffix: str = None, feature_names: List[str] = None, **save_args) -> Union[plt.Figure, str]:\n \"\"\"\n Reliability diagram to visualize miscalibration. This could be either in classical way for confidences only\n or w.r.t. additional properties (like x/y-coordinates of detection boxes, width, height, etc.). The additional\n properties get binned. Afterwards, the miscalibration will be calculated for each bin. This is\n visualized as a 2-D plots.\n\n Parameters\n ----------\n X : iterable of np.ndarray, or np.ndarray of shape=([n_bayes], n_samples, [n_classes/n_box_features])\n NumPy array with confidence values for each prediction on classification with shapes\n 1-D for binary classification, 2-D for multi class (softmax).\n If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points\n for a single sample (e.g. variational inference or MC dropout samples).\n If this is an iterable over multiple instances of np.ndarray and parameter batched=True,\n interpret this parameter as multiple predictions that should be averaged.\n On detection, this array must have 2 dimensions with number of additional box features in last dim.\n y : iterable of np.ndarray with same length as X or np.ndarray of shape=([n_bayes], n_samples, [n_classes])\n NumPy array with ground truth labels.\n Either as label vector (1-D) or as one-hot encoded ground truth array (2-D).\n If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points\n for a single sample (e.g. variational inference or MC dropout samples).\n If iterable over multiple instances of np.ndarray and parameter batched=True,\n interpret this parameter as multiple predictions that should be averaged.\n batched : bool, optional, default: False\n Multiple predictions can be evaluated at once (e.g. cross-validation examinations) using batched-mode.\n All predictions given by X and y are separately evaluated and their results are averaged afterwards\n for visualization.\n uncertainty : str, optional, default: False\n Define uncertainty handling if input X has been sampled e.g. by Monte-Carlo dropout or similar methods\n that output an ensemble of predictions per sample. Choose one of the following options:\n - flatten: treat everything as a separate prediction - this option will yield into a slightly better\n calibration performance but without the visualization of a prediction interval.\n - mean: compute Monte-Carlo integration to obtain a simple confidence estimate for a sample\n (mean) with a standard deviation that is visualized.\n filename : str, optional, default: None\n Optional filename to save the plotted figure.\n tikz : bool, optional, default: False\n If True, use 'tikzplotlib' package to return tikz-code for Latex rather than a Matplotlib figure.\n title_suffix : str, optional, default: None\n Suffix for plot title.\n feature_names : list, optional, default: None\n Names of the additional features that are attached to the axes of a reliability diagram.\n **save_args : args\n Additional arguments passed to 'matplotlib.pyplot.Figure.savefig' function if 'tikz' is False.\n If 'tikz' is True, the argument are passed to 'tikzplotlib.get_tikz_code' function.\n\n Returns\n -------\n matplotlib.pyplot.Figure if 'tikz' is False else str with tikz code.\n\n Raises\n ------\n AttributeError\n - If parameter metric is not string or string is not 'ACE', 'ECE' or 'MCE'\n - If parameter 'feature_names' is set but length does not fit to second dim of X\n - If no ground truth samples are provided\n - If length of bins parameter does not match the number of features given by X\n - If more than 3 feature dimensions (including confidence) are provided\n \"\"\"\n\n # assign deprecated constructor parameter to title_suffix and feature_names\n if hasattr(self, 'title_suffix') and title_suffix is None:\n title_suffix = self.title_suffix\n\n if hasattr(self, 'feature_names') and feature_names is None:\n feature_names = self.feature_names\n\n # check if metric is correct\n if not isinstance(self.metric, str):\n raise AttributeError('Parameter \\'metric\\' must be string with either \\'ece\\', \\'ace\\' or \\'mce\\'.')\n\n # check metrics parameter\n if self.metric.lower() not in ['ece', 'ace', 'mce']:\n raise AttributeError('Parameter \\'metric\\' must be string with either \\'ece\\', \\'ace\\' or \\'mce\\'.')\n else:\n self.metric = self.metric.lower()\n\n # perform checks and prepare input data\n X, matched, sample_uncertainty, bin_bounds, num_features = self._miscalibration.prepare(X, y, batched, uncertainty)\n if num_features > 3:\n raise AttributeError(\"Diagram is not defined for more than 2 additional feature dimensions.\")\n\n histograms = []\n for batch_X, batch_matched, batch_uncertainty, bounds in zip(X, matched, sample_uncertainty, bin_bounds):\n batch_histograms = self._miscalibration.binning(bounds, batch_X, batch_matched, batch_X[:, 0], batch_uncertainty[:, 0])\n histograms.append(batch_histograms[:-1])\n\n # no additional dimensions? compute standard reliability diagram\n if num_features == 1:\n fig1, fig2 = self.__plot_confidence_histogram(X, matched, histograms, bin_bounds, title_suffix)\n return fig1, fig2\n\n # one additional feature? compute 1D-plot\n elif num_features == 2:\n fig = self.__plot_1d(histograms, bin_bounds, title_suffix, feature_names)\n\n # two additional features? compute 2D plot\n elif num_features == 3:\n fig = self.__plot_2d(histograms, bin_bounds, title_suffix, feature_names)\n\n # number of dimensions exceeds 3? quit\n else:\n raise AttributeError(\"Diagram is not defined for more than 2 additional feature dimensions.\")\n\n # if tikz is true, create tikz code from matplotlib figure\n if tikz:\n\n # get tikz code for our specific figure and also pass filename to store possible bitmaps\n tikz_fig = tikzplotlib.get_tikz_code(fig, filepath=filename, **save_args)\n\n # close matplotlib figure when tikz figure is requested to save memory\n plt.close(fig)\n fig = tikz_fig\n\n # save figure either as matplotlib PNG or as tikz output file\n if filename is not None:\n if tikz:\n with open(filename, \"w\") as open_file:\n open_file.write(fig)\n else:\n fig.savefig(filename, **save_args)\n\n return fig\n\n @classmethod\n def __interpolate_grid(cls, metric_map: np.ndarray) -> np.ndarray:\n \"\"\" Interpolate missing values in a 2D-grid using the mean of the data. The interpolation is done inplace. \"\"\"\n\n # get all NaNs\n nans = np.isnan(metric_map)\n x = lambda z: z.nonzero()\n\n # get mean of the remaining values and interpolate missing by the mean\n mean = float(np.mean(metric_map[~nans]))\n metric_map[nans] = griddata(x(~nans), metric_map[~nans], x(nans), method='cubic', fill_value=mean)\n return metric_map\n\n def __plot_confidence_histogram(self, X: List[np.ndarray], matched: List[np.ndarray], histograms: List[np.ndarray],\n bin_bounds: List, title_suffix: str = None) -> plt.Figure:\n \"\"\" Plot confidence histogram and reliability diagram to visualize miscalibration for condidences only. \"\"\"\n\n # get number of bins (self.bins has not been processed yet)\n n_bins = len(bin_bounds[0][0])-1\n\n median_confidence = [(bounds[0][1:] + bounds[0][:-1]) * 0.5 for bounds in bin_bounds]\n mean_acc, mean_conf = [], []\n for batch_X, batch_matched, batch_hist, batch_median in zip(X, matched, histograms, median_confidence):\n acc_hist, conf_hist, _, num_samples_hist = batch_hist\n empty_bins, = np.nonzero(num_samples_hist == 0)\n\n # calculate overall mean accuracy and confidence\n mean_acc.append(np.mean(batch_matched))\n mean_conf.append(np.mean(batch_X))\n\n # set empty bins to median bin value\n acc_hist[empty_bins] = batch_median[empty_bins]\n conf_hist[empty_bins] = batch_median[empty_bins]\n\n # convert num_samples to relative afterwards (inplace denoted by [:])\n num_samples_hist[:] = num_samples_hist / np.sum(num_samples_hist)\n\n # import ipdb; ipdb.set_trace()\n # get mean histograms and values over all batches\n acc = np.mean([hist[0] for hist in histograms], axis=0)\n conf = np.mean([hist[1] for hist in histograms], axis=0)\n uncertainty = np.sqrt(np.mean([hist[2] for hist in histograms], axis=0))\n num_samples = np.mean([hist[3] for hist in histograms], axis=0)\n mean_acc = np.mean(mean_acc)\n mean_conf = np.mean(mean_conf)\n median_confidence = np.mean(median_confidence, axis=0)\n bar_width = np.mean([np.diff(bounds[0]) for bounds in bin_bounds], axis=0)\n\n # compute credible interval of uncertainty\n p = 0.05\n z_score = norm.ppf(1. - (p / 2))\n uncertainty = z_score * uncertainty\n\n # if no uncertainty is given, set variable uncertainty to None in order to prevent drawing error bars\n if np.count_nonzero(uncertainty) == 0:\n uncertainty = None\n\n # calculate deviation\n deviation = conf - acc\n\n fig1 = plt.figure(\"Reliability {}\".format(title_suffix))\n ax = fig1.add_subplot()\n # set title suffix if given\n # if title_suffix is not None:\n # ax.set_title('Reliability Diagram' + \" - \" + title_suffix)\n # else:\n # ax.set_title('Reliability Diagram')\n \n # create two overlaying bar charts with bin accuracy and the gap of each bin to the perfect calibration\n if self.style == \"bar\":\n # ax.bar(median_confidence, height=median_confidence, width=bar_width, align='center',\n # edgecolor='black', color='pink', alpha=0.6)\n ax.bar(median_confidence, height=acc, width=bar_width, align='center',\n edgecolor='black', yerr=uncertainty, capsize=2)\n # ax.bar(median_confidence, height=deviation, bottom=acc, width=bar_width, align='center',\n # edgecolor='black', color='red', alpha=0.6)\n else:\n ax.plot(median_confidence, acc, color=\"blue\", linestyle=\"-\")\n\n # draw diagonal as perfect calibration line\n ax.plot([0, 1], [0, 1], color='red', linestyle='-.')\n # ax.set_xlim((0.0, 1.0))\n # ax.set_ylim((0.0, 1.0))\n\n # labels and legend of second plot\n # ax.set_xlabel('Confidence')\n # ax.set_ylabel('Accuracy')\n ax.legend(['Output', 'Expected'], fontsize=14)\n\n\n fig2 = plt.figure(\"Conf. Hist.\")\n ax = fig2.add_subplot()\n ax.bar(median_confidence, height=num_samples, width=bar_width, align='center', edgecolor='black')\n ax.plot([mean_acc, mean_acc], [0.0, 1.0], color='red', linestyle='--')\n ax.plot([mean_conf, mean_conf], [0.0, 1.0], color='blue', linestyle='--')\n ax.set_xlim((0.0, 1.0))\n ax.set_ylim((0.0, 1.0))\n\n plt.tight_layout()\n\n return fig1, fig2\n\n # -----------------------------------------\n # plot data distribution histogram first\n fig, axes = plt.subplots(2, squeeze=True, figsize=(7, 6))\n ax = axes[0]\n\n # set title suffix is given\n if title_suffix is not None:\n ax.set_title('Confidence Histogram - ' + title_suffix)\n else:\n ax.set_title('Confidence Histogram')\n\n # create bar chart with relative amount of samples in each bin\n # as well as average confidence and accuracy\n ax.bar(median_confidence, height=num_samples, width=bar_width, align='center', edgecolor='black')\n ax.plot([mean_acc, mean_acc], [0.0, 1.0], color='black', linestyle='--')\n ax.plot([mean_conf, mean_conf], [0.0, 1.0], color='gray', linestyle='--')\n ax.set_xlim((0.0, 1.0))\n ax.set_ylim((0.0, 1.0))\n\n # labels and legend\n ax.set_xlabel('Confidence')\n ax.set_ylabel('% of Samples')\n ax.legend(['Avg. Accuracy', 'Avg. Confidence', 'Relative Amount of Samples'])\n\n # second plot: reliability histogram\n ax = axes[1]\n\n # set title suffix if given\n if title_suffix is not None:\n ax.set_title('Reliability Diagram' + \" - \" + title_suffix)\n else:\n ax.set_title('Reliability Diagram')\n\n # create two overlaying bar charts with bin accuracy and the gap of each bin to the perfect calibration\n ax.bar(median_confidence, height=acc, width=bar_width, align='center',\n edgecolor='black', yerr=uncertainty, capsize=4)\n ax.bar(median_confidence, height=deviation, bottom=acc, width=bar_width, align='center',\n edgecolor='black', color='red', alpha=0.6)\n\n # draw diagonal as perfect calibration line\n ax.plot([0, 1], [0, 1], color='red', linestyle='--')\n ax.set_xlim((0.0, 1.0))\n ax.set_ylim((0.0, 1.0))\n\n # labels and legend of second plot\n ax.set_xlabel('Confidence')\n ax.set_ylabel('Accuracy')\n ax.legend(['Perfect Calibration', 'Output', 'Gap'])\n\n plt.tight_layout()\n return fig\n\n def __plot_1d(self, histograms: List[np.ndarray], bin_bounds: List,\n title_suffix: str = None, feature_names: List[str] = None) -> plt.Figure:\n \"\"\" Plot 1-D miscalibration w.r.t. one additional feature. \"\"\"\n\n # z score for credible interval (if uncertainty is given)\n p = 0.05\n z_score = norm.ppf(1. - (p / 2))\n\n results = []\n for batch_hist, bounds in zip(histograms, bin_bounds):\n result = self._miscalibration.process(self.metric, *batch_hist)\n bin_median = (bounds[-1][:-1] + bounds[-1][1:]) * 0.5\n\n # interpolate missing values\n x = np.linspace(0.0, 1.0, 1000)\n miscalibration = interp1d(bin_median, result[1], kind='cubic', fill_value='extrapolate')(x)\n acc = interp1d(bin_median, result[2], kind='cubic', fill_value='extrapolate')(x)\n conf = interp1d(bin_median, result[3], kind='cubic', fill_value='extrapolate')(x)\n uncertainty = interp1d(bin_median, result[4], kind='cubic', fill_value='extrapolate')(x)\n\n results.append((miscalibration, acc, conf, uncertainty))\n\n # get mean over all batches and convert mean variance to a std deviation afterwards\n miscalibration = np.mean([result[0] for result in results], axis=0)\n acc = np.mean([result[1] for result in results], axis=0)\n conf = np.mean([result[2] for result in results], axis=0)\n uncertainty = np.sqrt(np.mean([result[3] for result in results], axis=0))\n\n # draw routines\n fig, ax1 = plt.subplots()\n conf_color = 'tab:blue'\n\n # set name of the additional feature\n if feature_names is not None:\n ax1.set_xlabel(feature_names[0])\n\n ax1.set_xlim([0.0, 1.0])\n ax1.set_ylim([0.0, 1.0])\n ax1.set_ylabel('accuracy/confidence', color=conf_color)\n\n # draw confidence and accuracy on the same (left) axis\n x = np.linspace(0.0, 1.0, 1000)\n line1, = ax1.plot(x, acc, '-.', color='black')\n line2, = ax1.plot(x, conf, '--', color=conf_color)\n ax1.tick_params('y', labelcolor=conf_color)\n\n # if uncertainty is given, compute average of variances over all bins and get std deviation by sqrt\n # compute credible interval afterwards\n # define lower and upper bound\n uncertainty = z_score * uncertainty\n lb = conf - uncertainty\n ub = conf + uncertainty\n\n # create second axis for miscalibration\n ax11 = ax1.twinx()\n miscal_color = 'tab:red'\n line3, = ax11.plot(x, miscalibration, '-', color=miscal_color)\n\n if self.metric == 'ace':\n ax11.set_ylabel('Average Calibration Error (ACE)', color=miscal_color)\n elif self.metric == 'ece':\n ax11.set_ylabel('Expected Calibration Error (ECE)', color=miscal_color)\n elif self.metric == 'mce':\n ax11.set_ylabel('Maximum Calibration Error (MCE)', color=miscal_color)\n\n ax11.tick_params('y', labelcolor=miscal_color)\n\n # set miscalibration limits if given\n if self.fmin is not None and self.fmax is not None:\n ax11.set_ylim([self.fmin, self.fmax])\n\n ax1.legend((line1, line2, line3),\n ('accuracy', 'confidence', '%s' % self.metric.upper()),\n loc='best')\n\n if title_suffix is not None:\n ax1.set_title('Accuracy, confidence and %s\\n- %s -' % (self.metric.upper(), title_suffix))\n else:\n ax1.set_title('Accuracy, confidence and %s' % self.metric.upper())\n\n ax1.grid(True)\n\n fig.tight_layout()\n return fig\n\n def __plot_2d(self, histograms: List[np.ndarray], bin_bounds: List[np.ndarray],\n title_suffix: str = None, feature_names: List[str] = None) -> plt.Figure:\n \"\"\" Plot 2D miscalibration reliability diagram heatmap. \"\"\"\n\n results = []\n for batch_hist in histograms:\n result = self._miscalibration.process(self.metric, *batch_hist)\n\n # interpolate 2D data inplace to avoid \"empty\" bins\n batch_samples = result[-1]\n for map in result[1:-1]:\n map[batch_samples == 0.0] = 0.0\n # TODO: check what to do here\n # map[batch_samples == 0.0] = np.nan\n # self.__interpolate_grid(map)\n\n # on interpolation, it is sometimes possible that empty bins have negative values\n # however, this is invalid for variance\n result[4][result[4] < 0] = 0.0\n results.append(result)\n\n # calculate mean over all batches and transpose\n # transpose is necessary. Miscalibration is calculated in the order given by the features\n # however, imshow expects arrays in format [rows, columns] or [height, width]\n # e.g., miscalibration with additional x/y (in this order) will be drawn [y, x] otherwise\n miscalibration = np.mean([result[1] for result in results], axis=0).T\n acc = np.mean([result[2] for result in results], axis=0).T\n conf = np.mean([result[3] for result in results], axis=0).T\n mean = np.mean([result[4] for result in results], axis=0).T\n uncertainty = np.sqrt(mean)\n\n # -----------------------------------------------------------------------------------------\n # draw routines\n\n def set_axis(ax, map, vmin=None, vmax=None):\n \"\"\" Generic function to set all subplots equally \"\"\"\n # TODO: set proper fmin, fmax values\n img = ax.imshow(map, origin='lower', interpolation=\"gaussian\", cmap='jet', aspect=1, vmin=vmin, vmax=vmax)\n\n # set correct x- and y-ticks\n ax.set_xticks(np.linspace(0., len(bin_bounds[0][1])-2, 5))\n ax.set_xticklabels(np.linspace(0., 1., 5))\n ax.set_yticks(np.linspace(0., len(bin_bounds[0][2])-2, 5))\n ax.set_yticklabels(np.linspace(0., 1., 5))\n ax.set_xlim([0.0, len(bin_bounds[0][1])-2])\n ax.set_ylim([0.0, len(bin_bounds[0][2])-2])\n\n # draw feature names on axes if given\n if feature_names is not None:\n ax.set_xlabel(feature_names[0])\n ax.set_ylabel(feature_names[1])\n\n fig.colorbar(img, ax=ax, fraction=0.046, pad=0.04)\n\n return ax, img\n\n # -----------------------------------\n\n # create only two subplots if no additional uncertainty is given\n if np.count_nonzero(uncertainty) == 0:\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))\n\n # process additional uncertainty if given\n else:\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, squeeze=True, figsize=(10, 10))\n ax4, img4 = set_axis(ax4, uncertainty)\n\n if title_suffix is not None:\n ax4.set_title(\"Confidence std deviation\\n- %s -\" % title_suffix)\n else:\n ax4.set_title(\"Confidence std deviation\")\n\n ax1, img1 = set_axis(ax1, acc, vmin=0, vmax=1)\n ax2, img2 = set_axis(ax2, conf, vmin=0, vmax=1)\n ax3, img3 = set_axis(ax3, miscalibration, vmin=self.fmin, vmax=self.fmax)\n\n # draw title if given\n if title_suffix is not None:\n ax1.set_title(\"Average accuracy\\n- %s -\" % title_suffix)\n ax2.set_title(\"Average confidence\\n- %s -\" % title_suffix)\n ax3.set_title(\"%s\\n- %s -\" % (self.metric.upper(), title_suffix))\n else:\n ax1.set_title(\"Average accuracy\")\n ax2.set_title(\"Average confidence\")\n ax3.set_title(\"%s\" % self.metric.upper())\n\n # -----------------------------------------------------------------------------------------\n\n return fig"
},
{
"identifier": "to_numpy",
"path": "calibrate/utils/torch_helper.py",
"snippet": "def to_numpy(x: torch.Tensor):\n return x.detach().cpu().numpy()"
}
] | import logging
import numpy as np
import torch
import torch.nn.functional as F
import wandb
from terminaltables import AsciiTable
from torch import nn
from .evaluator import DatasetEvaluator
from .metrics import ECELoss, AdaptiveECELoss, ClasswiseECELoss, OELoss, UELoss
from .reliability_diagram import ReliabilityDiagram
from calibrate.utils.torch_helper import to_numpy | 9,641 |
logger = logging.getLogger(__name__)
class CalibrateEvaluator(DatasetEvaluator):
def __init__(self, num_classes, num_bins=15, device="cuda:0") -> None:
self.num_classes = num_classes
self.num_bins = num_bins
self.device = device
self.reset()
def reset(self) -> None:
self.logits = None
self.labels = None
def num_samples(self):
return (
self.labels.shape[0]
if self.labels is not None
else 0
)
def main_metric(self) -> None:
return "ece"
def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None:
"""update
Args:
logits (torch.Tensor): n x num_classes
label (torch.Tensor): n x 1
"""
assert logits.shape[0] == labels.shape[0]
if self.logits is None:
self.logits = logits
self.labels = labels
else:
self.logits = torch.cat((self.logits, logits), dim=0)
self.labels = torch.cat((self.labels, labels), dim=0)
def mean_score(self, print=False, all_metric=True):
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss(self.num_bins).to(self.device)
aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device)
cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device)
oe_criterion = OELoss(self.num_bins).to(self.device)
ue_criterion = UELoss(self.num_bins).to(self.device)
nll = nll_criterion(self.logits, self.labels).item()
ece = ece_criterion(self.logits, self.labels).item()
aece = aece_criterion(self.logits, self.labels).item()
cece = cece_criterion(self.logits, self.labels).item()
oe = oe_criterion(self.logits, self.labels).item()
ue = ue_criterion(self.logits, self.labels).item()
# metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece}
metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece, "oe": oe, "ue": ue}
# columns = ["samples", "nll", "ece", "aece", "cece"]
columns = ["samples", "nll", "ece", "aece", "cece", "oe", "ue"]
table_data = [columns]
# table_data.append(
# [
# self.num_samples(),
# "{:.5f}".format(nll),
# "{:.5f}".format(ece),
# "{:.5f}".format(aece),
# "{:.5f}".format(cece),
# ]
# )
table_data.append(
[
self.num_samples(),
"{:.5f}".format(nll),
"{:.5f}".format(ece),
"{:.5f}".format(aece),
"{:.5f}".format(cece),
"{:.5f}".format(oe),
"{:.5f}".format(ue),
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()]
def wandb_score_table(self):
_, table_data = self.mean_score(print=False)
return wandb.Table(
columns=table_data[0],
data=table_data[1:]
)
def plot_reliability_diagram(self, title=""):
|
logger = logging.getLogger(__name__)
class CalibrateEvaluator(DatasetEvaluator):
def __init__(self, num_classes, num_bins=15, device="cuda:0") -> None:
self.num_classes = num_classes
self.num_bins = num_bins
self.device = device
self.reset()
def reset(self) -> None:
self.logits = None
self.labels = None
def num_samples(self):
return (
self.labels.shape[0]
if self.labels is not None
else 0
)
def main_metric(self) -> None:
return "ece"
def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None:
"""update
Args:
logits (torch.Tensor): n x num_classes
label (torch.Tensor): n x 1
"""
assert logits.shape[0] == labels.shape[0]
if self.logits is None:
self.logits = logits
self.labels = labels
else:
self.logits = torch.cat((self.logits, logits), dim=0)
self.labels = torch.cat((self.labels, labels), dim=0)
def mean_score(self, print=False, all_metric=True):
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss(self.num_bins).to(self.device)
aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device)
cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device)
oe_criterion = OELoss(self.num_bins).to(self.device)
ue_criterion = UELoss(self.num_bins).to(self.device)
nll = nll_criterion(self.logits, self.labels).item()
ece = ece_criterion(self.logits, self.labels).item()
aece = aece_criterion(self.logits, self.labels).item()
cece = cece_criterion(self.logits, self.labels).item()
oe = oe_criterion(self.logits, self.labels).item()
ue = ue_criterion(self.logits, self.labels).item()
# metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece}
metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece, "oe": oe, "ue": ue}
# columns = ["samples", "nll", "ece", "aece", "cece"]
columns = ["samples", "nll", "ece", "aece", "cece", "oe", "ue"]
table_data = [columns]
# table_data.append(
# [
# self.num_samples(),
# "{:.5f}".format(nll),
# "{:.5f}".format(ece),
# "{:.5f}".format(aece),
# "{:.5f}".format(cece),
# ]
# )
table_data.append(
[
self.num_samples(),
"{:.5f}".format(nll),
"{:.5f}".format(ece),
"{:.5f}".format(aece),
"{:.5f}".format(cece),
"{:.5f}".format(oe),
"{:.5f}".format(ue),
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()]
def wandb_score_table(self):
_, table_data = self.mean_score(print=False)
return wandb.Table(
columns=table_data[0],
data=table_data[1:]
)
def plot_reliability_diagram(self, title=""): | diagram = ReliabilityDiagram(bins=25, style="curve") | 6 | 2023-12-17 13:53:18+00:00 | 12k |
mjavadpur/Sadtalker_LongVideos | src/face3d/models/facerecon_model.py | [
{
"identifier": "BaseModel",
"path": "src/face3d/models/base_model.py",
"snippet": "class BaseModel(ABC):\n \"\"\"This class is an abstract base class (ABC) for models.\n To create a subclass, you need to implement the following five functions:\n -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).\n -- <set_input>: unpack data from dataset and apply preprocessing.\n -- <forward>: produce intermediate results.\n -- <optimize_parameters>: calculate losses, gradients, and update network weights.\n -- <modify_commandline_options>: (optionally) add model-specific options and set default options.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize the BaseModel class.\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n\n When creating your custom class, you need to implement your own initialization.\n In this fucntion, you should first call <BaseModel.__init__(self, opt)>\n Then, you need to define four lists:\n -- self.loss_names (str list): specify the training losses that you want to plot and save.\n -- self.model_names (str list): specify the images that you want to display and save.\n -- self.visual_names (str list): define networks used in our training.\n -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.\n \"\"\"\n self.opt = opt\n self.isTrain = False\n self.device = torch.device('cpu') \n self.save_dir = \" \" # os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir\n self.loss_names = []\n self.model_names = []\n self.visual_names = []\n self.parallel_names = []\n self.optimizers = []\n self.image_paths = []\n self.metric = 0 # used for learning rate policy 'plateau'\n\n @staticmethod\n def dict_grad_hook_factory(add_func=lambda x: x):\n saved_dict = dict()\n\n def hook_gen(name):\n def grad_hook(grad):\n saved_vals = add_func(grad)\n saved_dict[name] = saved_vals\n return grad_hook\n return hook_gen, saved_dict\n\n @staticmethod\n def modify_commandline_options(parser, is_train):\n \"\"\"Add new model-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n \"\"\"\n return parser\n\n @abstractmethod\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): includes the data itself and its metadata information.\n \"\"\"\n pass\n\n @abstractmethod\n def forward(self):\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n pass\n\n @abstractmethod\n def optimize_parameters(self):\n \"\"\"Calculate losses, gradients, and update network weights; called in every training iteration\"\"\"\n pass\n\n def setup(self, opt):\n \"\"\"Load and print networks; create schedulers\n\n Parameters:\n opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n if self.isTrain:\n self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]\n \n if not self.isTrain or opt.continue_train:\n load_suffix = opt.epoch\n self.load_networks(load_suffix)\n \n \n # self.print_networks(opt.verbose)\n\n def parallelize(self, convert_sync_batchnorm=True):\n if not self.opt.use_ddp:\n for name in self.parallel_names:\n if isinstance(name, str):\n module = getattr(self, name)\n setattr(self, name, module.to(self.device))\n else:\n for name in self.model_names:\n if isinstance(name, str):\n module = getattr(self, name)\n if convert_sync_batchnorm:\n module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module)\n setattr(self, name, torch.nn.parallel.DistributedDataParallel(module.to(self.device),\n device_ids=[self.device.index], \n find_unused_parameters=True, broadcast_buffers=True))\n \n # DistributedDataParallel is not needed when a module doesn't have any parameter that requires a gradient.\n for name in self.parallel_names:\n if isinstance(name, str) and name not in self.model_names:\n module = getattr(self, name)\n setattr(self, name, module.to(self.device))\n \n # put state_dict of optimizer to gpu device\n if self.opt.phase != 'test':\n if self.opt.continue_train:\n for optim in self.optimizers:\n for state in optim.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(self.device)\n\n def data_dependent_initialize(self, data):\n pass\n\n def train(self):\n \"\"\"Make models train mode\"\"\"\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, name)\n net.train()\n\n def eval(self):\n \"\"\"Make models eval mode\"\"\"\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, name)\n net.eval()\n\n def test(self):\n \"\"\"Forward function used in test time.\n\n This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop\n It also calls <compute_visuals> to produce additional visualization results\n \"\"\"\n with torch.no_grad():\n self.forward()\n self.compute_visuals()\n\n def compute_visuals(self):\n \"\"\"Calculate additional output images for visdom and HTML visualization\"\"\"\n pass\n\n def get_image_paths(self, name='A'):\n \"\"\" Return image paths that are used to load current data\"\"\"\n return self.image_paths if name =='A' else self.image_paths_B\n\n def update_learning_rate(self):\n \"\"\"Update learning rates for all the networks; called at the end of every epoch\"\"\"\n for scheduler in self.schedulers:\n if self.opt.lr_policy == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = %.7f' % lr)\n\n def get_current_visuals(self):\n \"\"\"Return visualization images. train.py will display these images with visdom, and save the images to a HTML\"\"\"\n visual_ret = OrderedDict()\n for name in self.visual_names:\n if isinstance(name, str):\n visual_ret[name] = getattr(self, name)[:, :3, ...]\n return visual_ret\n\n def get_current_losses(self):\n \"\"\"Return traning losses / errors. train.py will print out these errors on console, and save them to a file\"\"\"\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret\n\n def save_networks(self, epoch):\n \"\"\"Save all the networks to the disk.\n\n Parameters:\n epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n \"\"\"\n if not os.path.isdir(self.save_dir):\n os.makedirs(self.save_dir)\n\n save_filename = 'epoch_%s.pth' % (epoch)\n save_path = os.path.join(self.save_dir, save_filename)\n \n save_dict = {}\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, name)\n if isinstance(net, torch.nn.DataParallel) or isinstance(net,\n torch.nn.parallel.DistributedDataParallel):\n net = net.module\n save_dict[name] = net.state_dict()\n \n\n for i, optim in enumerate(self.optimizers):\n save_dict['opt_%02d'%i] = optim.state_dict()\n\n for i, sched in enumerate(self.schedulers):\n save_dict['sched_%02d'%i] = sched.state_dict()\n \n torch.save(save_dict, save_path)\n\n def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):\n \"\"\"Fix InstanceNorm checkpoints incompatibility (prior to 0.4)\"\"\"\n key = keys[i]\n if i + 1 == len(keys): # at the end, pointing to a parameter/buffer\n if module.__class__.__name__.startswith('InstanceNorm') and \\\n (key == 'running_mean' or key == 'running_var'):\n if getattr(module, key) is None:\n state_dict.pop('.'.join(keys))\n if module.__class__.__name__.startswith('InstanceNorm') and \\\n (key == 'num_batches_tracked'):\n state_dict.pop('.'.join(keys))\n else:\n self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)\n\n def load_networks(self, epoch):\n \"\"\"Load all the networks from the disk.\n\n Parameters:\n epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n \"\"\"\n if self.opt.isTrain and self.opt.pretrained_name is not None:\n load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)\n else:\n load_dir = self.save_dir \n load_filename = 'epoch_%s.pth' % (epoch)\n load_path = os.path.join(load_dir, load_filename)\n state_dict = torch.load(load_path, map_location=self.device)\n print('loading the model from %s' % load_path)\n\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, name)\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n net.load_state_dict(state_dict[name])\n \n if self.opt.phase != 'test':\n if self.opt.continue_train:\n print('loading the optim from %s' % load_path)\n for i, optim in enumerate(self.optimizers):\n optim.load_state_dict(state_dict['opt_%02d'%i])\n\n try:\n print('loading the sched from %s' % load_path)\n for i, sched in enumerate(self.schedulers):\n sched.load_state_dict(state_dict['sched_%02d'%i])\n except:\n print('Failed to load schedulers, set schedulers according to epoch count manually')\n for i, sched in enumerate(self.schedulers):\n sched.last_epoch = self.opt.epoch_count - 1\n \n\n \n\n def print_networks(self, verbose):\n \"\"\"Print the total number of parameters in the network and (if verbose) network architecture\n\n Parameters:\n verbose (bool) -- if verbose: print the network architecture\n \"\"\"\n print('---------- Networks initialized -------------')\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, name)\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n if verbose:\n print(net)\n print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))\n print('-----------------------------------------------')\n\n def set_requires_grad(self, nets, requires_grad=False):\n \"\"\"Set requies_grad=Fasle for all the networks to avoid unnecessary computations\n Parameters:\n nets (network list) -- a list of networks\n requires_grad (bool) -- whether the networks require gradients or not\n \"\"\"\n if not isinstance(nets, list):\n nets = [nets]\n for net in nets:\n if net is not None:\n for param in net.parameters():\n param.requires_grad = requires_grad\n\n def generate_visuals_for_evaluation(self, data, mode):\n return {}"
},
{
"identifier": "networks",
"path": "src/face3d/models/networks.py",
"snippet": "def resize_n_crop(image, M, dsize=112):\ndef filter_state_dict(state_dict, remove_name='fc'):\ndef get_scheduler(optimizer, opt):\n def lambda_rule(epoch):\ndef define_net_recon(net_recon, use_last_fc=False, init_path=None):\ndef define_net_recog(net_recog, pretrained_path=None):\n def __init__(self, net_recon, use_last_fc=False, init_path=None):\n def forward(self, x):\n def __init__(self, net_recog, pretrained_path=None, input_size=112):\n def forward(self, image, M):\ndef conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:\ndef conv1x1(in_planes: int, out_planes: int, stride: int = 1, bias: bool = False) -> nn.Conv2d:\n def __init__(\n self,\n inplanes: int,\n planes: int,\n stride: int = 1,\n downsample: Optional[nn.Module] = None,\n groups: int = 1,\n base_width: int = 64,\n dilation: int = 1,\n norm_layer: Optional[Callable[..., nn.Module]] = None\n ) -> None:\n def forward(self, x: Tensor) -> Tensor:\n def __init__(\n self,\n inplanes: int,\n planes: int,\n stride: int = 1,\n downsample: Optional[nn.Module] = None,\n groups: int = 1,\n base_width: int = 64,\n dilation: int = 1,\n norm_layer: Optional[Callable[..., nn.Module]] = None\n ) -> None:\n def forward(self, x: Tensor) -> Tensor:\n def __init__(\n self,\n block: Type[Union[BasicBlock, Bottleneck]],\n layers: List[int],\n num_classes: int = 1000,\n zero_init_residual: bool = False,\n use_last_fc: bool = False,\n groups: int = 1,\n width_per_group: int = 64,\n replace_stride_with_dilation: Optional[List[bool]] = None,\n norm_layer: Optional[Callable[..., nn.Module]] = None\n ) -> None:\n def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,\n stride: int = 1, dilate: bool = False) -> nn.Sequential:\n def _forward_impl(self, x: Tensor) -> Tensor:\n def forward(self, x: Tensor) -> Tensor:\ndef _resnet(\n arch: str,\n block: Type[Union[BasicBlock, Bottleneck]],\n layers: List[int],\n pretrained: bool,\n progress: bool,\n **kwargs: Any\n) -> ResNet:\ndef resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\ndef resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\ndef resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\ndef resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\ndef resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\ndef resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\ndef resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\ndef wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\ndef wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\nclass ReconNetWrapper(nn.Module):\nclass RecogNetWrapper(nn.Module):\nclass BasicBlock(nn.Module):\nclass Bottleneck(nn.Module):\nclass ResNet(nn.Module):"
},
{
"identifier": "ParametricFaceModel",
"path": "src/face3d/models/bfm.py",
"snippet": "class ParametricFaceModel:\n def __init__(self, \n bfm_folder='./BFM', \n recenter=True,\n camera_distance=10.,\n init_lit=np.array([\n 0.8, 0, 0, 0, 0, 0, 0, 0, 0\n ]),\n focal=1015.,\n center=112.,\n is_train=True,\n default_name='BFM_model_front.mat'):\n \n if not os.path.isfile(os.path.join(bfm_folder, default_name)):\n transferBFM09(bfm_folder)\n \n model = loadmat(os.path.join(bfm_folder, default_name))\n # mean face shape. [3*N,1]\n self.mean_shape = model['meanshape'].astype(np.float32)\n # identity basis. [3*N,80]\n self.id_base = model['idBase'].astype(np.float32)\n # expression basis. [3*N,64]\n self.exp_base = model['exBase'].astype(np.float32)\n # mean face texture. [3*N,1] (0-255)\n self.mean_tex = model['meantex'].astype(np.float32)\n # texture basis. [3*N,80]\n self.tex_base = model['texBase'].astype(np.float32)\n # face indices for each vertex that lies in. starts from 0. [N,8]\n self.point_buf = model['point_buf'].astype(np.int64) - 1\n # vertex indices for each face. starts from 0. [F,3]\n self.face_buf = model['tri'].astype(np.int64) - 1\n # vertex indices for 68 landmarks. starts from 0. [68,1]\n self.keypoints = np.squeeze(model['keypoints']).astype(np.int64) - 1\n\n if is_train:\n # vertex indices for small face region to compute photometric error. starts from 0.\n self.front_mask = np.squeeze(model['frontmask2_idx']).astype(np.int64) - 1\n # vertex indices for each face from small face region. starts from 0. [f,3]\n self.front_face_buf = model['tri_mask2'].astype(np.int64) - 1\n # vertex indices for pre-defined skin region to compute reflectance loss\n self.skin_mask = np.squeeze(model['skinmask'])\n \n if recenter:\n mean_shape = self.mean_shape.reshape([-1, 3])\n mean_shape = mean_shape - np.mean(mean_shape, axis=0, keepdims=True)\n self.mean_shape = mean_shape.reshape([-1, 1])\n\n self.persc_proj = perspective_projection(focal, center)\n self.device = 'cpu'\n self.camera_distance = camera_distance\n self.SH = SH()\n self.init_lit = init_lit.reshape([1, 1, -1]).astype(np.float32)\n \n\n def to(self, device):\n self.device = device\n for key, value in self.__dict__.items():\n if type(value).__module__ == np.__name__:\n setattr(self, key, torch.tensor(value).to(device))\n\n \n def compute_shape(self, id_coeff, exp_coeff):\n \"\"\"\n Return:\n face_shape -- torch.tensor, size (B, N, 3)\n\n Parameters:\n id_coeff -- torch.tensor, size (B, 80), identity coeffs\n exp_coeff -- torch.tensor, size (B, 64), expression coeffs\n \"\"\"\n batch_size = id_coeff.shape[0]\n id_part = torch.einsum('ij,aj->ai', self.id_base, id_coeff)\n exp_part = torch.einsum('ij,aj->ai', self.exp_base, exp_coeff)\n face_shape = id_part + exp_part + self.mean_shape.reshape([1, -1])\n return face_shape.reshape([batch_size, -1, 3])\n \n\n def compute_texture(self, tex_coeff, normalize=True):\n \"\"\"\n Return:\n face_texture -- torch.tensor, size (B, N, 3), in RGB order, range (0, 1.)\n\n Parameters:\n tex_coeff -- torch.tensor, size (B, 80)\n \"\"\"\n batch_size = tex_coeff.shape[0]\n face_texture = torch.einsum('ij,aj->ai', self.tex_base, tex_coeff) + self.mean_tex\n if normalize:\n face_texture = face_texture / 255.\n return face_texture.reshape([batch_size, -1, 3])\n\n\n def compute_norm(self, face_shape):\n \"\"\"\n Return:\n vertex_norm -- torch.tensor, size (B, N, 3)\n\n Parameters:\n face_shape -- torch.tensor, size (B, N, 3)\n \"\"\"\n\n v1 = face_shape[:, self.face_buf[:, 0]]\n v2 = face_shape[:, self.face_buf[:, 1]]\n v3 = face_shape[:, self.face_buf[:, 2]]\n e1 = v1 - v2\n e2 = v2 - v3\n face_norm = torch.cross(e1, e2, dim=-1)\n face_norm = F.normalize(face_norm, dim=-1, p=2)\n face_norm = torch.cat([face_norm, torch.zeros(face_norm.shape[0], 1, 3).to(self.device)], dim=1)\n \n vertex_norm = torch.sum(face_norm[:, self.point_buf], dim=2)\n vertex_norm = F.normalize(vertex_norm, dim=-1, p=2)\n return vertex_norm\n\n\n def compute_color(self, face_texture, face_norm, gamma):\n \"\"\"\n Return:\n face_color -- torch.tensor, size (B, N, 3), range (0, 1.)\n\n Parameters:\n face_texture -- torch.tensor, size (B, N, 3), from texture model, range (0, 1.)\n face_norm -- torch.tensor, size (B, N, 3), rotated face normal\n gamma -- torch.tensor, size (B, 27), SH coeffs\n \"\"\"\n batch_size = gamma.shape[0]\n v_num = face_texture.shape[1]\n a, c = self.SH.a, self.SH.c\n gamma = gamma.reshape([batch_size, 3, 9])\n gamma = gamma + self.init_lit\n gamma = gamma.permute(0, 2, 1)\n Y = torch.cat([\n a[0] * c[0] * torch.ones_like(face_norm[..., :1]).to(self.device),\n -a[1] * c[1] * face_norm[..., 1:2],\n a[1] * c[1] * face_norm[..., 2:],\n -a[1] * c[1] * face_norm[..., :1],\n a[2] * c[2] * face_norm[..., :1] * face_norm[..., 1:2],\n -a[2] * c[2] * face_norm[..., 1:2] * face_norm[..., 2:],\n 0.5 * a[2] * c[2] / np.sqrt(3.) * (3 * face_norm[..., 2:] ** 2 - 1),\n -a[2] * c[2] * face_norm[..., :1] * face_norm[..., 2:],\n 0.5 * a[2] * c[2] * (face_norm[..., :1] ** 2 - face_norm[..., 1:2] ** 2)\n ], dim=-1)\n r = Y @ gamma[..., :1]\n g = Y @ gamma[..., 1:2]\n b = Y @ gamma[..., 2:]\n face_color = torch.cat([r, g, b], dim=-1) * face_texture\n return face_color\n\n \n def compute_rotation(self, angles):\n \"\"\"\n Return:\n rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat\n\n Parameters:\n angles -- torch.tensor, size (B, 3), radian\n \"\"\"\n\n batch_size = angles.shape[0]\n ones = torch.ones([batch_size, 1]).to(self.device)\n zeros = torch.zeros([batch_size, 1]).to(self.device)\n x, y, z = angles[:, :1], angles[:, 1:2], angles[:, 2:],\n \n rot_x = torch.cat([\n ones, zeros, zeros,\n zeros, torch.cos(x), -torch.sin(x), \n zeros, torch.sin(x), torch.cos(x)\n ], dim=1).reshape([batch_size, 3, 3])\n \n rot_y = torch.cat([\n torch.cos(y), zeros, torch.sin(y),\n zeros, ones, zeros,\n -torch.sin(y), zeros, torch.cos(y)\n ], dim=1).reshape([batch_size, 3, 3])\n\n rot_z = torch.cat([\n torch.cos(z), -torch.sin(z), zeros,\n torch.sin(z), torch.cos(z), zeros,\n zeros, zeros, ones\n ], dim=1).reshape([batch_size, 3, 3])\n\n rot = rot_z @ rot_y @ rot_x\n return rot.permute(0, 2, 1)\n\n\n def to_camera(self, face_shape):\n face_shape[..., -1] = self.camera_distance - face_shape[..., -1]\n return face_shape\n\n def to_image(self, face_shape):\n \"\"\"\n Return:\n face_proj -- torch.tensor, size (B, N, 2), y direction is opposite to v direction\n\n Parameters:\n face_shape -- torch.tensor, size (B, N, 3)\n \"\"\"\n # to image_plane\n face_proj = face_shape @ self.persc_proj\n face_proj = face_proj[..., :2] / face_proj[..., 2:]\n\n return face_proj\n\n\n def transform(self, face_shape, rot, trans):\n \"\"\"\n Return:\n face_shape -- torch.tensor, size (B, N, 3) pts @ rot + trans\n\n Parameters:\n face_shape -- torch.tensor, size (B, N, 3)\n rot -- torch.tensor, size (B, 3, 3)\n trans -- torch.tensor, size (B, 3)\n \"\"\"\n return face_shape @ rot + trans.unsqueeze(1)\n\n\n def get_landmarks(self, face_proj):\n \"\"\"\n Return:\n face_lms -- torch.tensor, size (B, 68, 2)\n\n Parameters:\n face_proj -- torch.tensor, size (B, N, 2)\n \"\"\" \n return face_proj[:, self.keypoints]\n\n def split_coeff(self, coeffs):\n \"\"\"\n Return:\n coeffs_dict -- a dict of torch.tensors\n\n Parameters:\n coeffs -- torch.tensor, size (B, 256)\n \"\"\"\n id_coeffs = coeffs[:, :80]\n exp_coeffs = coeffs[:, 80: 144]\n tex_coeffs = coeffs[:, 144: 224]\n angles = coeffs[:, 224: 227]\n gammas = coeffs[:, 227: 254]\n translations = coeffs[:, 254:]\n return {\n 'id': id_coeffs,\n 'exp': exp_coeffs,\n 'tex': tex_coeffs,\n 'angle': angles,\n 'gamma': gammas,\n 'trans': translations\n }\n def compute_for_render(self, coeffs):\n \"\"\"\n Return:\n face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate\n face_color -- torch.tensor, size (B, N, 3), in RGB order\n landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction\n Parameters:\n coeffs -- torch.tensor, size (B, 257)\n \"\"\"\n coef_dict = self.split_coeff(coeffs)\n face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp'])\n rotation = self.compute_rotation(coef_dict['angle'])\n\n\n face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans'])\n face_vertex = self.to_camera(face_shape_transformed)\n \n face_proj = self.to_image(face_vertex)\n landmark = self.get_landmarks(face_proj)\n\n face_texture = self.compute_texture(coef_dict['tex'])\n face_norm = self.compute_norm(face_shape)\n face_norm_roted = face_norm @ rotation\n face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma'])\n\n return face_vertex, face_texture, face_color, landmark\n\n def compute_for_render_woRotation(self, coeffs):\n \"\"\"\n Return:\n face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate\n face_color -- torch.tensor, size (B, N, 3), in RGB order\n landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction\n Parameters:\n coeffs -- torch.tensor, size (B, 257)\n \"\"\"\n coef_dict = self.split_coeff(coeffs)\n face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp'])\n #rotation = self.compute_rotation(coef_dict['angle'])\n\n\n #face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans'])\n face_vertex = self.to_camera(face_shape)\n \n face_proj = self.to_image(face_vertex)\n landmark = self.get_landmarks(face_proj)\n\n face_texture = self.compute_texture(coef_dict['tex'])\n face_norm = self.compute_norm(face_shape)\n face_norm_roted = face_norm # @ rotation\n face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma'])\n\n return face_vertex, face_texture, face_color, landmark"
},
{
"identifier": "perceptual_loss",
"path": "src/face3d/models/losses.py",
"snippet": "def perceptual_loss(id_featureA, id_featureB):\n cosine_d = torch.sum(id_featureA * id_featureB, dim=-1)\n # assert torch.sum((cosine_d > 1).float()) == 0\n return torch.sum(1 - cosine_d) / cosine_d.shape[0] "
},
{
"identifier": "photo_loss",
"path": "src/face3d/models/losses.py",
"snippet": "def photo_loss(imageA, imageB, mask, eps=1e-6):\n \"\"\"\n l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur)\n Parameters:\n imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order \n imageB --same as imageA\n \"\"\"\n loss = torch.sqrt(eps + torch.sum((imageA - imageB) ** 2, dim=1, keepdims=True)) * mask\n loss = torch.sum(loss) / torch.max(torch.sum(mask), torch.tensor(1.0).to(mask.device))\n return loss"
},
{
"identifier": "reg_loss",
"path": "src/face3d/models/losses.py",
"snippet": "def reg_loss(coeffs_dict, opt=None):\n \"\"\"\n l2 norm without the sqrt, from yu's implementation (mse)\n tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss\n Parameters:\n coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans\n\n \"\"\"\n # coefficient regularization to ensure plausible 3d faces\n if opt:\n w_id, w_exp, w_tex = opt.w_id, opt.w_exp, opt.w_tex\n else:\n w_id, w_exp, w_tex = 1, 1, 1, 1\n creg_loss = w_id * torch.sum(coeffs_dict['id'] ** 2) + \\\n w_exp * torch.sum(coeffs_dict['exp'] ** 2) + \\\n w_tex * torch.sum(coeffs_dict['tex'] ** 2)\n creg_loss = creg_loss / coeffs_dict['id'].shape[0]\n\n # gamma regularization to ensure a nearly-monochromatic light\n gamma = coeffs_dict['gamma'].reshape([-1, 3, 9])\n gamma_mean = torch.mean(gamma, dim=1, keepdims=True)\n gamma_loss = torch.mean((gamma - gamma_mean) ** 2)\n\n return creg_loss, gamma_loss"
},
{
"identifier": "reflectance_loss",
"path": "src/face3d/models/losses.py",
"snippet": "def reflectance_loss(texture, mask):\n \"\"\"\n minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo\n Parameters:\n texture --torch.tensor, (B, N, 3)\n mask --torch.tensor, (N), 1 or 0\n\n \"\"\"\n mask = mask.reshape([1, mask.shape[0], 1])\n texture_mean = torch.sum(mask * texture, dim=1, keepdims=True) / torch.sum(mask)\n loss = torch.sum(((texture - texture_mean) * mask)**2) / (texture.shape[0] * torch.sum(mask))\n return loss"
},
{
"identifier": "landmark_loss",
"path": "src/face3d/models/losses.py",
"snippet": "def landmark_loss(predict_lm, gt_lm, weight=None):\n \"\"\"\n weighted mse loss\n Parameters:\n predict_lm --torch.tensor (B, 68, 2)\n gt_lm --torch.tensor (B, 68, 2)\n weight --numpy.array (1, 68)\n \"\"\"\n if not weight:\n weight = np.ones([68])\n weight[28:31] = 20\n weight[-8:] = 20\n weight = np.expand_dims(weight, 0)\n weight = torch.tensor(weight).to(predict_lm.device)\n loss = torch.sum((predict_lm - gt_lm)**2, dim=-1) * weight\n loss = torch.sum(loss) / (predict_lm.shape[0] * predict_lm.shape[1])\n return loss"
},
{
"identifier": "util",
"path": "src/face3d/util/util.py",
"snippet": "def str2bool(v):\ndef copyconf(default_opt, **kwargs):\ndef genvalconf(train_opt, **kwargs):\ndef find_class_in_module(target_cls_name, module):\ndef tensor2im(input_image, imtype=np.uint8):\ndef diagnose_network(net, name='network'):\ndef save_image(image_numpy, image_path, aspect_ratio=1.0):\ndef print_numpy(x, val=True, shp=False):\ndef mkdirs(paths):\ndef mkdir(path):\ndef correct_resize_label(t, size):\ndef correct_resize(t, size, mode=Image.BICUBIC):\ndef draw_landmarks(img, landmark, color='r', step=2):\n _, H, W, _ = img.shape"
},
{
"identifier": "MeshRenderer",
"path": "src/face3d/util/nvdiffrast.py",
"snippet": "class MeshRenderer(nn.Module):\n def __init__(self,\n rasterize_fov,\n znear=0.1,\n zfar=10, \n rasterize_size=224):\n super(MeshRenderer, self).__init__()\n\n # x = np.tan(np.deg2rad(rasterize_fov * 0.5)) * znear\n # self.ndc_proj = torch.tensor(ndc_projection(x=x, n=znear, f=zfar)).matmul(\n # torch.diag(torch.tensor([1., -1, -1, 1])))\n self.rasterize_size = rasterize_size\n self.fov = rasterize_fov\n self.znear = znear\n self.zfar = zfar\n\n self.rasterizer = None\n \n def forward(self, vertex, tri, feat=None):\n \"\"\"\n Return:\n mask -- torch.tensor, size (B, 1, H, W)\n depth -- torch.tensor, size (B, 1, H, W)\n features(optional) -- torch.tensor, size (B, C, H, W) if feat is not None\n\n Parameters:\n vertex -- torch.tensor, size (B, N, 3)\n tri -- torch.tensor, size (B, M, 3) or (M, 3), triangles\n feat(optional) -- torch.tensor, size (B, N ,C), features\n \"\"\"\n device = vertex.device\n rsize = int(self.rasterize_size)\n # ndc_proj = self.ndc_proj.to(device)\n # trans to homogeneous coordinates of 3d vertices, the direction of y is the same as v\n if vertex.shape[-1] == 3:\n vertex = torch.cat([vertex, torch.ones([*vertex.shape[:2], 1]).to(device)], dim=-1)\n vertex[..., 0] = -vertex[..., 0]\n\n\n # vertex_ndc = vertex @ ndc_proj.t()\n if self.rasterizer is None:\n self.rasterizer = MeshRasterizer()\n print(\"create rasterizer on device cuda:%d\"%device.index)\n \n # ranges = None\n # if isinstance(tri, List) or len(tri.shape) == 3:\n # vum = vertex_ndc.shape[1]\n # fnum = torch.tensor([f.shape[0] for f in tri]).unsqueeze(1).to(device)\n # fstartidx = torch.cumsum(fnum, dim=0) - fnum\n # ranges = torch.cat([fstartidx, fnum], axis=1).type(torch.int32).cpu()\n # for i in range(tri.shape[0]):\n # tri[i] = tri[i] + i*vum\n # vertex_ndc = torch.cat(vertex_ndc, dim=0)\n # tri = torch.cat(tri, dim=0)\n\n # for range_mode vetex: [B*N, 4], tri: [B*M, 3], for instance_mode vetex: [B, N, 4], tri: [M, 3]\n tri = tri.type(torch.int32).contiguous()\n\n # rasterize\n cameras = FoVPerspectiveCameras(\n device=device,\n fov=self.fov,\n znear=self.znear,\n zfar=self.zfar,\n )\n\n raster_settings = RasterizationSettings(\n image_size=rsize\n )\n\n # print(vertex.shape, tri.shape)\n mesh = Meshes(vertex.contiguous()[...,:3], tri.unsqueeze(0).repeat((vertex.shape[0],1,1)))\n\n fragments = self.rasterizer(mesh, cameras = cameras, raster_settings = raster_settings)\n rast_out = fragments.pix_to_face.squeeze(-1)\n depth = fragments.zbuf\n\n # render depth\n depth = depth.permute(0, 3, 1, 2)\n mask = (rast_out > 0).float().unsqueeze(1)\n depth = mask * depth\n \n\n image = None\n if feat is not None:\n attributes = feat.reshape(-1,3)[mesh.faces_packed()]\n image = pytorch3d.ops.interpolate_face_attributes(fragments.pix_to_face,\n fragments.bary_coords,\n attributes)\n # print(image.shape)\n image = image.squeeze(-2).permute(0, 3, 1, 2)\n image = mask * image\n \n return mask, depth, image"
}
] | import numpy as np
import torch
import trimesh
from src.face3d.models.base_model import BaseModel
from src.face3d.models import networks
from src.face3d.models.bfm import ParametricFaceModel
from src.face3d.models.losses import perceptual_loss, photo_loss, reg_loss, reflectance_loss, landmark_loss
from src.face3d.util import util
from src.face3d.util.nvdiffrast import MeshRenderer
from scipy.io import savemat | 10,773 | """This script defines the face reconstruction model for Deep3DFaceRecon_pytorch
"""
# from src.face3d.util.preprocess import estimate_norm_torch
class FaceReconModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=False):
""" Configures options specific for CUT model
"""
# net structure and parameters
parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='network structure')
parser.add_argument('--init_path', type=str, default='./checkpoints/init_model/resnet50-0676ba61.pth')
parser.add_argument('--use_last_fc', type=util.str2bool, nargs='?', const=True, default=False, help='zero initialize the last fc')
parser.add_argument('--bfm_folder', type=str, default='./checkpoints/BFM_Fitting/')
parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model')
# renderer parameters
parser.add_argument('--focal', type=float, default=1015.)
parser.add_argument('--center', type=float, default=112.)
parser.add_argument('--camera_d', type=float, default=10.)
parser.add_argument('--z_near', type=float, default=5.)
parser.add_argument('--z_far', type=float, default=15.)
if is_train:
# training parameters
parser.add_argument('--net_recog', type=str, default='r50', choices=['r18', 'r43', 'r50'], help='face recog network structure')
parser.add_argument('--net_recog_path', type=str, default='checkpoints/recog_model/ms1mv3_arcface_r50_fp16/backbone.pth')
parser.add_argument('--use_crop_face', type=util.str2bool, nargs='?', const=True, default=False, help='use crop mask for photo loss')
parser.add_argument('--use_predef_M', type=util.str2bool, nargs='?', const=True, default=False, help='use predefined M for predicted face')
# augmentation parameters
parser.add_argument('--shift_pixs', type=float, default=10., help='shift pixels')
parser.add_argument('--scale_delta', type=float, default=0.1, help='delta scale factor')
parser.add_argument('--rot_angle', type=float, default=10., help='rot angles, degree')
# loss weights
parser.add_argument('--w_feat', type=float, default=0.2, help='weight for feat loss')
parser.add_argument('--w_color', type=float, default=1.92, help='weight for loss loss')
parser.add_argument('--w_reg', type=float, default=3.0e-4, help='weight for reg loss')
parser.add_argument('--w_id', type=float, default=1.0, help='weight for id_reg loss')
parser.add_argument('--w_exp', type=float, default=0.8, help='weight for exp_reg loss')
parser.add_argument('--w_tex', type=float, default=1.7e-2, help='weight for tex_reg loss')
parser.add_argument('--w_gamma', type=float, default=10.0, help='weight for gamma loss')
parser.add_argument('--w_lm', type=float, default=1.6e-3, help='weight for lm loss')
parser.add_argument('--w_reflc', type=float, default=5.0, help='weight for reflc loss')
opt, _ = parser.parse_known_args()
parser.set_defaults(
focal=1015., center=112., camera_d=10., use_last_fc=False, z_near=5., z_far=15.
)
if is_train:
parser.set_defaults(
use_crop_face=True, use_predef_M=False
)
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
self.visual_names = ['output_vis']
self.model_names = ['net_recon']
self.parallel_names = self.model_names + ['renderer']
self.facemodel = ParametricFaceModel(
bfm_folder=opt.bfm_folder, camera_distance=opt.camera_d, focal=opt.focal, center=opt.center,
is_train=self.isTrain, default_name=opt.bfm_model
)
fov = 2 * np.arctan(opt.center / opt.focal) * 180 / np.pi
self.renderer = MeshRenderer(
rasterize_fov=fov, znear=opt.z_near, zfar=opt.z_far, rasterize_size=int(2 * opt.center)
)
if self.isTrain:
self.loss_names = ['all', 'feat', 'color', 'lm', 'reg', 'gamma', 'reflc']
self.net_recog = networks.define_net_recog(
net_recog=opt.net_recog, pretrained_path=opt.net_recog_path
)
# loss func name: (compute_%s_loss) % loss_name
self.compute_feat_loss = perceptual_loss
self.comupte_color_loss = photo_loss
self.compute_lm_loss = landmark_loss
self.compute_reg_loss = reg_loss
| """This script defines the face reconstruction model for Deep3DFaceRecon_pytorch
"""
# from src.face3d.util.preprocess import estimate_norm_torch
class FaceReconModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=False):
""" Configures options specific for CUT model
"""
# net structure and parameters
parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='network structure')
parser.add_argument('--init_path', type=str, default='./checkpoints/init_model/resnet50-0676ba61.pth')
parser.add_argument('--use_last_fc', type=util.str2bool, nargs='?', const=True, default=False, help='zero initialize the last fc')
parser.add_argument('--bfm_folder', type=str, default='./checkpoints/BFM_Fitting/')
parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model')
# renderer parameters
parser.add_argument('--focal', type=float, default=1015.)
parser.add_argument('--center', type=float, default=112.)
parser.add_argument('--camera_d', type=float, default=10.)
parser.add_argument('--z_near', type=float, default=5.)
parser.add_argument('--z_far', type=float, default=15.)
if is_train:
# training parameters
parser.add_argument('--net_recog', type=str, default='r50', choices=['r18', 'r43', 'r50'], help='face recog network structure')
parser.add_argument('--net_recog_path', type=str, default='checkpoints/recog_model/ms1mv3_arcface_r50_fp16/backbone.pth')
parser.add_argument('--use_crop_face', type=util.str2bool, nargs='?', const=True, default=False, help='use crop mask for photo loss')
parser.add_argument('--use_predef_M', type=util.str2bool, nargs='?', const=True, default=False, help='use predefined M for predicted face')
# augmentation parameters
parser.add_argument('--shift_pixs', type=float, default=10., help='shift pixels')
parser.add_argument('--scale_delta', type=float, default=0.1, help='delta scale factor')
parser.add_argument('--rot_angle', type=float, default=10., help='rot angles, degree')
# loss weights
parser.add_argument('--w_feat', type=float, default=0.2, help='weight for feat loss')
parser.add_argument('--w_color', type=float, default=1.92, help='weight for loss loss')
parser.add_argument('--w_reg', type=float, default=3.0e-4, help='weight for reg loss')
parser.add_argument('--w_id', type=float, default=1.0, help='weight for id_reg loss')
parser.add_argument('--w_exp', type=float, default=0.8, help='weight for exp_reg loss')
parser.add_argument('--w_tex', type=float, default=1.7e-2, help='weight for tex_reg loss')
parser.add_argument('--w_gamma', type=float, default=10.0, help='weight for gamma loss')
parser.add_argument('--w_lm', type=float, default=1.6e-3, help='weight for lm loss')
parser.add_argument('--w_reflc', type=float, default=5.0, help='weight for reflc loss')
opt, _ = parser.parse_known_args()
parser.set_defaults(
focal=1015., center=112., camera_d=10., use_last_fc=False, z_near=5., z_far=15.
)
if is_train:
parser.set_defaults(
use_crop_face=True, use_predef_M=False
)
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
self.visual_names = ['output_vis']
self.model_names = ['net_recon']
self.parallel_names = self.model_names + ['renderer']
self.facemodel = ParametricFaceModel(
bfm_folder=opt.bfm_folder, camera_distance=opt.camera_d, focal=opt.focal, center=opt.center,
is_train=self.isTrain, default_name=opt.bfm_model
)
fov = 2 * np.arctan(opt.center / opt.focal) * 180 / np.pi
self.renderer = MeshRenderer(
rasterize_fov=fov, znear=opt.z_near, zfar=opt.z_far, rasterize_size=int(2 * opt.center)
)
if self.isTrain:
self.loss_names = ['all', 'feat', 'color', 'lm', 'reg', 'gamma', 'reflc']
self.net_recog = networks.define_net_recog(
net_recog=opt.net_recog, pretrained_path=opt.net_recog_path
)
# loss func name: (compute_%s_loss) % loss_name
self.compute_feat_loss = perceptual_loss
self.comupte_color_loss = photo_loss
self.compute_lm_loss = landmark_loss
self.compute_reg_loss = reg_loss | self.compute_reflc_loss = reflectance_loss | 6 | 2023-12-19 11:01:35+00:00 | 12k |
Angryrou/udao | udao/optimization/tests/moo/test_sequential_progressive_frontier.py | [
{
"identifier": "set_deterministic_torch",
"path": "udao/model/utils/utils.py",
"snippet": "def set_deterministic_torch(seed: int = 0) -> None:\n \"\"\"\n Set seeds and configurations to enable deterministic behavior in PyTorch.\n\n Parameters\n ----------\n seed : int\n Random seed to use.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.use_deterministic_algorithms(True) # type: ignore"
},
{
"identifier": "VarTypes",
"path": "udao/utils/interfaces.py",
"snippet": "class VarTypes(Enum):\n INT = \"int\"\n BOOL = \"bool\"\n CATEGORY = \"category\"\n FLOAT = \"float\""
},
{
"identifier": "MOProblem",
"path": "udao/optimization/concepts/problem.py",
"snippet": "class MOProblem(BaseProblem):\n \"\"\"Multi-objective optimization problem.\"\"\"\n\n def __init__(\n self,\n objectives: Sequence[Objective],\n variables: Dict[str, Variable],\n constraints: Sequence[Constraint],\n data_processor: Optional[DataProcessor] = None,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> None:\n self.objectives = objectives\n super().__init__(\n variables,\n constraints,\n data_processor=data_processor,\n input_parameters=input_parameters,\n )\n\n def __repr__(self) -> str:\n return (\n f\"MOProblem(objectives={self.objectives}, \"\n f\"variables={self.variables}, \"\n f\"constraints={self.constraints}, \"\n f\"input_parameters={self.input_parameters})\"\n )"
},
{
"identifier": "SequentialProgressiveFrontier",
"path": "udao/optimization/moo/progressive_frontier/sequential_progressive_frontier.py",
"snippet": "class SequentialProgressiveFrontier(BaseProgressiveFrontier):\n \"\"\"\n Sequential Progressive Frontier -\n a progressive frontier algorithm that explores the uncertainty space\n sequentially, by dividing the space into subrectangles and\n exploring the subrectangles one by one.\n \"\"\"\n\n @dataclass\n class Params(BaseProgressiveFrontier.Params):\n n_probes: int = 10\n \"\"\"number of probes\"\"\"\n\n def __init__(\n self,\n solver: SOSolver,\n params: Params,\n ) -> None:\n super().__init__(solver, params)\n self.n_probes = params.n_probes\n\n def solve(\n self,\n problem: MOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Solve MOO by Progressive Frontier\n\n Parameters\n ----------\n problem : MOProblem\n MOO problem to be solved\n\n Returns\n -------\n Tuple[np.ndarray | None, np.ndarray | None]\n optimal objectives and variables\n None, None if no solution is found\n \"\"\"\n rectangle_queue: List[Rectangle] = []\n n_objs = len(problem.objectives)\n plans = [\n self.get_anchor_point(problem=problem, obj_ind=i, seed=seed)\n for i in range(n_objs)\n ]\n utopia, nadir = self.get_utopia_and_nadir(plans)\n rectangle = Rectangle(utopia, nadir)\n heapq.heappush(rectangle_queue, rectangle)\n for _ in range(self.n_probes - n_objs):\n if not rectangle_queue:\n logger.info(\"No more uncertainty space to explore further!\")\n break\n rectangle = heapq.heappop(rectangle_queue)\n middle_point, subrectangles = self._find_local_optimum(\n problem, rectangle, seed=seed\n )\n if middle_point is not None:\n plans.append(middle_point)\n for sub_rect in subrectangles:\n if sub_rect.volume != 0:\n heapq.heappush(rectangle_queue, sub_rect)\n\n ## filter dominated points\n po_objs_list = [point.objs.tolist() for point in plans]\n po_vars_list = [point.vars for point in plans]\n po_objs, po_vars = moo_ut.summarize_ret(po_objs_list, po_vars_list)\n\n return po_objs, po_vars\n\n def _find_local_optimum(\n self,\n problem: MOProblem,\n rectangle: Rectangle,\n seed: Optional[int] = None,\n ) -> Tuple[Optional[Point], List[Rectangle]]:\n \"\"\"\n Find the local optimum in the given rectangle and\n the subrectangles in which to continue the search.\n If no optimum is found for the rectangle,\n return None and the upper subrectangles.\n If an optimum is found, return the optimum and\n the subrectangles in which to continue the search.\n\n Parameters\n ----------\n rectangle : Rectangle\n Rectangle in which to find the local optimum\n wl_id : str | None\n workload id\n\n Returns\n -------\n Tuple[Point | None, List[Rectangle]]\n The local optimum and the subrectangles in which to continue the search\n \"\"\"\n current_utopia, current_nadir = Point(rectangle.lower_bounds), Point(\n rectangle.upper_bounds\n )\n logger.debug(f\"current utopia is: {current_utopia}\")\n logger.debug(f\"current nadir is: {current_nadir}\")\n middle_objs = np.array(\n [\n current_nadir.objs[i]\n if i == self.opt_obj_ind\n else (current_utopia.objs[i] + current_nadir.objs[i]) / 2\n for i in range(len(problem.objectives))\n ]\n )\n middle_point = Point(middle_objs)\n obj_bounds_dict = self._form_obj_bounds_dict(\n problem, current_utopia, middle_point\n )\n logger.debug(f\"obj_bounds are: {obj_bounds_dict}\")\n so_problem = self._so_problem_from_bounds_dict(\n problem, obj_bounds_dict, problem.objectives[self.opt_obj_ind]\n )\n try:\n _, soo_vars = self.solver.solve(so_problem, seed=seed)\n except NoSolutionError:\n logger.debug(\n \"This is an empty area \\n \"\n \"don't have pareto points, only \"\n \"divide current uncertainty space\"\n )\n middle_point = Point((current_utopia.objs + current_nadir.objs) / 2)\n rectangles = self.generate_sub_rectangles(\n current_utopia, current_nadir, middle_point, successful=False\n )\n return None, rectangles\n else:\n middle_objs = self._compute_objectives(problem, soo_vars)\n middle_point = Point(middle_objs, soo_vars)\n return middle_point, self.generate_sub_rectangles(\n current_utopia, current_nadir, middle_point\n )\n\n def generate_sub_rectangles(\n self, utopia: Point, nadir: Point, middle: Point, successful: bool = True\n ) -> List[Rectangle]:\n \"\"\"\n\n Generate uncertainty space to be explored:\n - if starting from successful optimum as middle, excludes the dominated\n space (middle as utopia and nadir as nadir)\n - if starting from unsuccessful optimum as middle, excludes the space where\n all constraining objectives are lower than the middle point.\n\n Parameters\n ----------\n utopia: Point\n the utopia point\n nadir: Point\n the nadir point\n middle: Point\n the middle point generated by\n the constrained single objective optimization\n successful: bool\n whether the middle point is from a successful optimization\n\n Returns\n -------\n List[Rectangle]\n sub rectangles to be explored\n \"\"\"\n\n rectangles = []\n corner_points = self._get_corner_points(utopia, nadir)\n for point in corner_points:\n # space explored (lower half of constraining objectives)\n is_explored_unconclusive = not successful and np.all(\n middle.objs[1:] - point.objs[1:] > 0\n )\n # nadir point\n is_dominated = successful and np.all(middle.objs - point.objs < 0)\n if is_dominated or is_explored_unconclusive:\n continue\n sub_rect_u, sub_rect_n = self.get_utopia_and_nadir([point, middle])\n rectangles.append(Rectangle(sub_rect_u, sub_rect_n))\n\n return rectangles\n\n def _get_corner_points(self, utopia: Point, nadir: Point) -> List[Point]:\n \"\"\"\n get the corner points that can form a hyper_rectangle\n from utopia and nadir points.\n\n Parameters\n ----------\n utopia: Points (defined by class), the utopia point\n nadir: Points (defined by class), the nadir point\n\n Returns\n -------\n List[Point]\n 2^n_objs corner points\n \"\"\"\n n_objs = utopia.n_objs\n u_obj_values, n_obj_values = utopia.objs.reshape(\n [n_objs, 1]\n ), nadir.objs.reshape([n_objs, 1])\n grids_list = np.hstack([u_obj_values, n_obj_values])\n\n ## generate cartesian product of grids_list\n objs_corner_points = np.array([list(i) for i in itertools.product(*grids_list)])\n corner_points = [Point(objs=obj_values) for obj_values in objs_corner_points]\n\n return corner_points"
},
{
"identifier": "MOGD",
"path": "udao/optimization/soo/mogd.py",
"snippet": "class MOGD(SOSolver):\n \"\"\"MOGD solver for single-objective optimization.\n\n Performs gradient descent on input variables by minimizing an\n objective loss and a constraint loss.\n \"\"\"\n\n @dataclass\n class Params:\n learning_rate: float\n \"\"\"learning rate of Adam optimizer applied to input variables\"\"\"\n max_iters: int\n \"\"\"maximum number of iterations for a single local search\"\"\"\n patience: int\n \"\"\"maximum number of iterations without improvement\"\"\"\n multistart: int\n \"\"\"number of random starts for gradient descent\"\"\"\n objective_stress: float = 10.0\n \"\"\"stress term for objective functions\"\"\"\n constraint_stress: float = 1e5\n \"\"\"stress term for constraint functions\"\"\"\n strict_rounding: bool = False\n \"\"\"whether strictly rounding integer variables at each iteration. \"\"\"\n batch_size: int = 1\n \"\"\"batch size for gradient descent\"\"\"\n device: Optional[th.device] = field(default_factory=get_default_device)\n \"\"\"device on which to perform torch operations, by default available device.\"\"\"\n dtype: th.dtype = th.float32\n \"\"\"type of the tensors\"\"\"\n\n def __init__(self, params: Params) -> None:\n super().__init__()\n self.lr = params.learning_rate\n self.max_iter = params.max_iters\n self.patience = params.patience\n self.multistart = params.multistart\n self.objective_stress = params.objective_stress\n self.constraint_stress = params.constraint_stress\n self.strict_rounding = params.strict_rounding\n self.batch_size = params.batch_size\n self.device = params.device\n self.dtype = params.dtype\n\n def _get_unprocessed_input_values(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n input_parameters: Optional[Dict[str, Any]] = None,\n seed: Optional[int] = None,\n ) -> Tuple[Dict[str, th.Tensor], Dict[str, Any]]:\n \"\"\"\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables for which to get random values\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n seed : Optional[int], optional\n Random seed, by default None\n\n Returns\n -------\n Tuple[Dict[str, th.Tensor], Dict[str, Any]]\n - random values as a tensor for each numeric variable\n - input parameters valuies\n \"\"\"\n numeric_values: Dict[str, np.ndarray] = {}\n\n for i, (name, variable) in enumerate(numeric_variables.items()):\n numeric_values[name] = co.variable.get_random_variable_values(\n variable, self.batch_size, seed=seed + i if seed is not None else None\n )\n return derive_unprocessed_input(\n input_variables=numeric_values,\n input_parameters=input_parameters,\n device=self.device,\n )\n\n def _get_processed_input_values(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n data_processor: DataProcessor,\n input_parameters: Optional[Dict[str, Any]] = None,\n seed: Optional[int] = None,\n ) -> Tuple[UdaoInput, UdaoItemShape, Callable[[th.Tensor], TabularContainer]]:\n \"\"\"Get random values for numeric variables\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n data_processor : DataProcessor\n Data processor to process input variables\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n\n Returns\n -------\n Tuple[UdaoInput, UdaoInputShape, Callable[[th.Tensor], TabularContainer]]\n - random values for numeric variables\n - shape of the input\n - function to convert a tensor to a TabularContainer\n \"\"\"\n numeric_values: Dict[str, np.ndarray] = {}\n\n for i, (name, variable) in enumerate(numeric_variables.items()):\n numeric_values[name] = co.variable.get_random_variable_values(\n variable, self.batch_size, seed=seed + i if seed is not None else None\n )\n input_data, iterator = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters or {},\n input_variables=numeric_values,\n device=self.device,\n )\n make_tabular_container = cast(\n UdaoIterator, iterator\n ).get_tabular_features_container\n\n input_data_shape = iterator.shape\n\n return (\n input_data,\n input_data_shape,\n make_tabular_container,\n )\n\n def _get_unprocessed_input_bounds(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n ) -> Tuple[Dict[str, float], Dict[str, float]]:\n \"\"\"\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Variables for which to get bounds\n\n Returns\n -------\n Tuple[Dict[str, float], Dict[str, float]]\n - lower bounds of numeric variables\n - upper bounds of numeric variables\n \"\"\"\n lower_numeric_values = {\n name: variable.lower for name, variable in numeric_variables.items()\n }\n upper_numeric_values = {\n name: variable.upper for name, variable in numeric_variables.items()\n }\n return lower_numeric_values, upper_numeric_values\n\n def _get_processed_input_bounds(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n data_processor: DataProcessor,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> Tuple[UdaoInput, UdaoInput]:\n \"\"\"Get bounds of numeric variables\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n data_processor : DataProcessor\n Data processor to process input variables\n input_parameters : Optional[Dict[str, Any]], optional\n Input parameters, by default None\n\n Returns\n -------\n Tuple[UdaoInput, UdaoInput]\n Lower and upper bounds of numeric\n variables in the form of a UdaoInput\n \"\"\"\n lower_numeric_values = {\n name: variable.lower for name, variable in numeric_variables.items()\n }\n upper_numeric_values = {\n name: variable.upper for name, variable in numeric_variables.items()\n }\n lower_input, _ = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters,\n input_variables=lower_numeric_values,\n )\n upper_input, _ = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters,\n input_variables=upper_numeric_values,\n )\n if self.device:\n return lower_input.to(self.device), upper_input.to(self.device)\n else:\n return lower_input, upper_input\n\n def _gradient_descent(\n self,\n problem: co.SOProblem,\n input_data: Union[UdaoInput, Dict],\n optimizer: th.optim.Optimizer,\n ) -> Tuple[int, float, float]:\n \"\"\"Perform a gradient descent step on input variables\n\n Parameters\n ----------\n problem : co.SOProblem\n Single-objective optimization problem\n input_data : Union[UdaoInput, Dict]\n Input data - can have different types depending on whether\n the input variables are processed or not.\n - UdaoInput: the naive input\n - Dict: {\"input_variables\": ..., \"input_parameters\": ...}\n\n optimizer : th.optim.Optimizer\n PyTorch optimizer\n\n Returns\n -------\n Tuple[int, float, float]\n - index of minimum loss\n - minimum loss\n - objective value at minimum loss\n\n Raises\n ------\n UncompliantSolutionError\n If no solution within bounds is found\n \"\"\"\n # Compute objective, constraints and corresponding losses\n\n loss_meta = self._compute_loss(problem, input_data)\n sum_loss = loss_meta[\"sum_loss\"]\n min_loss = loss_meta[\"min_loss\"]\n min_loss_id = loss_meta[\"min_loss_id\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n\n optimizer.zero_grad()\n sum_loss.backward() # type: ignore\n optimizer.step()\n\n if is_within_constraint and (\n self.within_objective_bounds(best_obj, problem.objective)\n ):\n return min_loss_id, min_loss, best_obj\n else:\n raise UncompliantSolutionError(\"No solution within bounds found!\")\n\n def _log_success(\n self,\n problem: co.SOProblem,\n iter: int,\n best_obj: float,\n best_iter: int,\n best_feature_input: Any,\n ) -> None:\n logger.debug(\n f\"Finished at iteration {iter}, best local {problem.objective.name} \"\n f\"found {best_obj:.5f}\"\n f\" \\nat iteration {best_iter},\"\n f\" \\nwith vars: {best_feature_input}, for \"\n f\"objective {problem.objective} and constraints {problem.constraints}\"\n )\n\n def _log_failure(\n self,\n problem: co.SOProblem,\n iter: int,\n ) -> None:\n logger.debug(\n f\"Finished at iteration {iter}, no valid {problem.objective.name}\"\n f\" found for input parameters {problem.input_parameters} with \"\n f\"objective {problem.objective} and constraints {problem.constraints}\"\n )\n\n def _unprocessed_single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization, in the case where\n no data processor is defined.\n The input variables are transformed to a dictionary of tensors and are\n optimized directly, by being passed to the objective function along\n with the input parameters.\n \"\"\"\n best_iter: Optional[int] = None\n best_loss = np.inf\n best_obj: Optional[float] = None\n best_feature_input: Optional[Dict[str, th.Tensor]] = None\n\n (\n input_variable_values,\n input_parameter_values,\n ) = self._get_unprocessed_input_values(\n cast(Dict[str, co.NumericVariable], problem.variables),\n input_parameters=problem.input_parameters,\n seed=seed,\n )\n lower_input, upper_input = self._get_unprocessed_input_bounds(\n cast(Dict[str, co.NumericVariable], problem.variables)\n )\n for name in input_variable_values:\n input_variable_values[name].requires_grad_(True)\n optimizer = optim.Adam([t for t in input_variable_values.values()], lr=self.lr)\n i = 0\n while i < self.max_iter:\n with th.no_grad():\n input_variable_values_backup = {\n k: v.detach().clone() for k, v in input_variable_values.items()\n }\n try:\n min_loss_id, min_loss, local_best_obj = self._gradient_descent(\n problem,\n {\n \"input_variables\": input_variable_values,\n \"input_parameters\": input_parameter_values,\n },\n optimizer=optimizer,\n )\n except UncompliantSolutionError:\n pass\n else:\n if min_loss < best_loss:\n best_loss = min_loss\n best_obj = local_best_obj\n best_feature_input = {\n k: v[min_loss_id].reshape(1, -1)\n for k, v in input_variable_values_backup.items()\n }\n best_iter = i\n\n with th.no_grad():\n # Update input_variable_values with constrained values\n for k in input_variable_values:\n input_variable_values[k].data = th.clip(\n input_variable_values[k].data,\n lower_input[k],\n upper_input[k],\n )\n\n if self.strict_rounding:\n # Round all integer variables at each iteration\n for k in input_variable_values:\n if isinstance(problem.variables[k], co.IntegerVariable):\n input_variable_values[k].data = input_variable_values[\n k\n ].data.round()\n\n if best_iter is not None and i > best_iter + self.patience:\n break\n i += 1\n\n if best_iter is None or best_obj is None or best_feature_input is None:\n self._log_failure(problem, i)\n raise NoSolutionError\n\n if not self.strict_rounding:\n for k in best_feature_input:\n if isinstance(problem.variables[k], co.IntegerVariable):\n best_feature_input[k].data = best_feature_input[k].data.round()\n loss_meta = self._compute_loss(\n problem,\n {\n \"input_variables\": best_feature_input,\n \"input_parameters\": input_parameter_values,\n },\n )\n best_loss = loss_meta[\"min_loss\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n if (\n best_obj is None\n or not is_within_constraint\n or not self.within_objective_bounds(best_obj, problem.objective)\n ):\n self._log_failure(problem, i)\n raise NoSolutionError\n\n best_raw_vars = {\n name: best_feature_input[name]\n .cpu()\n .numpy()\n .squeeze()\n .tolist() # turn np.ndarray to float\n for name in problem.variables\n }\n self._log_success(problem, i, best_obj, best_iter, best_raw_vars)\n return best_obj, best_raw_vars, best_loss\n\n def _processed_single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization, in the case where\n a data processor is defined.\n\n input variables and parameters are processed by the data processor.\n Gradient descent is performed on the processed input variables.\n Variables are then inverse transformed to get the raw variables.\n \"\"\"\n if not problem.data_processor:\n raise Exception(\"Data processor is not defined!\")\n best_iter: Optional[int] = None\n best_loss = np.inf\n best_obj: Optional[float] = None\n best_feature_input: Optional[th.Tensor] = None\n # Random numeric variables and their characteristics\n (\n input_data,\n input_data_shape,\n make_tabular_container,\n ) = self._get_processed_input_values(\n cast(Dict[str, co.NumericVariable], problem.variables),\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters,\n seed=seed,\n )\n # Bounds of numeric variables\n lower_input, upper_input = self._get_processed_input_bounds(\n cast(Dict[str, co.NumericVariable], problem.variables),\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters,\n )\n # Indices of numeric variables on which to apply gradients\n mask = th.tensor(\n [i in problem.variables for i in input_data_shape.feature_names],\n device=self.device,\n )\n grad_indices = th.nonzero(mask, as_tuple=False).squeeze()\n input_vars_subvector = input_data.features[:, grad_indices].clone().detach()\n input_vars_subvector.requires_grad_(True)\n\n optimizer = optim.Adam([input_vars_subvector], lr=self.lr)\n i = 0\n while i < self.max_iter:\n input_data.features = input_data.features.clone().detach()\n input_data.features[:, grad_indices] = input_vars_subvector\n try:\n min_loss_id, min_loss, local_best_obj = self._gradient_descent(\n problem,\n input_data,\n optimizer=optimizer,\n )\n except UncompliantSolutionError:\n pass\n else:\n if min_loss < best_loss:\n best_loss = min_loss\n best_obj = local_best_obj\n best_feature_input = (\n input_data.features.detach()[min_loss_id].clone().reshape(1, -1)\n )\n best_iter = i\n\n with th.no_grad():\n # Update input_vars_subvector with constrained values\n input_vars_subvector.data = th.clip(\n input_vars_subvector.data,\n # Use .data to avoid gradient tracking during update\n lower_input.features[0, grad_indices],\n upper_input.features[0, grad_indices],\n )\n\n if self.strict_rounding:\n # Round all integer variables at each iteration\n input_data.features[:, grad_indices] = input_vars_subvector.data\n feature_container = make_tabular_container(\n input_data.features.detach()\n )\n best_raw_df = problem.data_processor.inverse_transform(\n feature_container, \"tabular_features\"\n )\n numeric_values: Dict[str, np.ndarray] = {\n name: best_raw_df[[name]].values.round()[:, 0]\n if isinstance(variable, co.IntegerVariable)\n else best_raw_df[[name]].values[:, 0]\n for name, variable in problem.variables.items()\n }\n input_data_raw, _ = derive_processed_input(\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters or {},\n input_variables=numeric_values,\n device=self.device,\n )\n input_vars_subvector.data = input_data_raw.features[:, grad_indices]\n\n if best_iter is not None and i > best_iter + self.patience:\n break\n i += 1\n\n if best_iter is None or best_obj is None or best_feature_input is None:\n self._log_failure(problem, i)\n raise NoSolutionError\n\n with th.no_grad():\n best_feature_input = cast(th.Tensor, best_feature_input)\n feature_container = make_tabular_container(best_feature_input)\n best_raw_df = problem.data_processor.inverse_transform(\n feature_container, \"tabular_features\"\n )\n if not self.strict_rounding:\n best_raw_vars: Dict[str, Any] = {\n name: best_raw_df[[name]].values.round()[:, 0]\n if isinstance(variable, co.IntegerVariable)\n else best_raw_df[[name]].values[:, 0]\n for name, variable in problem.variables.items()\n }\n input_data_best_raw, _ = derive_processed_input(\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters or {},\n input_variables=best_raw_vars,\n device=self.device,\n )\n loss_meta = self._compute_loss(problem, input_data_best_raw)\n best_loss = loss_meta[\"min_loss\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n if (\n best_obj is None\n or not is_within_constraint\n or not self.within_objective_bounds(best_obj, problem.objective)\n ):\n self._log_failure(problem, i)\n raise NoSolutionError\n else:\n best_raw_vars = {\n name: best_raw_df[[name]]\n .values.squeeze()\n .tolist() # turn np.ndarray to float\n for name in problem.variables\n }\n self._log_success(problem, i, best_obj, best_iter, best_raw_vars)\n return best_obj, best_raw_vars, best_loss\n\n def _single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization.\n Categorical variables are fixed to the values in input_parameters.\n (a grid search of categorical variables is performed in solve)\n This is where gradient descent is performed.\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n objective : co.Objective\n Objective to be optimized\n constraints : Sequence[co.Constraint]\n Constraints to be satisfied\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n seed: int, by default None\n random seed\n\n Returns\n -------\n Tuple[float, Dict[str, float], flat]\n - objective value\n - variables\n - best loss value\n\n Raises\n ------\n NoSolutionError\n No valid solution is found\n \"\"\"\n\n if not problem.data_processor:\n return self._unprocessed_single_start_opt(problem, seed=seed)\n else:\n return self._processed_single_start_opt(problem, seed=seed)\n\n def solve(\n self, problem: co.SOProblem, seed: Optional[int] = None\n ) -> Tuple[float, Dict[str, float]]:\n if seed is not None:\n th.manual_seed(seed)\n if self.device:\n for constraint in problem.constraints:\n constraint.to(self.device)\n problem.objective.to(self.device)\n\n categorical_variables = [\n name\n for name, variable in problem.variables.items()\n if isinstance(variable, co.EnumVariable)\n ]\n numeric_variables = {\n name: variable\n for name, variable in problem.variables.items()\n if isinstance(variable, co.NumericVariable)\n }\n\n meshed_categorical_vars = self.get_meshed_categorical_vars(problem.variables)\n\n if meshed_categorical_vars is None:\n meshed_categorical_vars = np.array([0])\n\n best_loss_list: List[float] = []\n obj_list: List[float] = []\n vars_list: List[Dict] = []\n for i in range(self.multistart):\n for categorical_cell in meshed_categorical_vars:\n categorical_values = {\n name: categorical_cell[ind]\n for ind, name in enumerate(categorical_variables)\n } # from {id: value} to {name: value}\n fixed_values = {\n **categorical_values,\n **(problem.input_parameters or {}),\n }\n try:\n (\n obj_pred,\n best_raw_vars,\n best_loss,\n ) = self._single_start_opt(\n co.SOProblem(\n variables=numeric_variables, # type: ignore\n input_parameters=fixed_values,\n objective=problem.objective,\n constraints=problem.constraints or [],\n data_processor=problem.data_processor,\n ),\n seed=seed + i if seed is not None else None,\n )\n except NoSolutionError:\n continue\n else:\n best_loss_list.append(best_loss)\n obj_list.append(obj_pred)\n vars_list.append(best_raw_vars)\n if not obj_list:\n raise NoSolutionError(\"No valid solutions and variables found!\")\n\n idx = np.argmin(best_loss_list)\n vars_cand = vars_list[idx]\n if vars_cand is not None:\n obj_cand = obj_list[idx]\n if obj_cand is None:\n raise Exception(f\"Unexpected objs_list[{idx}] is None.\")\n else:\n raise NoSolutionError(\"No valid solutions and variables found!\")\n\n return obj_cand, vars_cand\n\n ##################\n ## _loss ##\n ##################\n def constraints_loss(\n self, constraint_values: List[th.Tensor], constraints: Sequence[co.Constraint]\n ) -> th.Tensor:\n \"\"\"\n compute loss of the values of each constraint function fixme: double-check\n\n Parameters\n ----------\n constraint_values : List[th.Tensor]\n values of each constraint function\n constraints : Sequence[co.Constraint]\n constraint functions\n\n Returns\n -------\n th.Tensor\n loss of the values of each constraint function\n\n \"\"\"\n\n # vars: a tensor\n # get loss for constraint functions defined in the problem setting\n total_loss = th.zeros_like(\n constraint_values[0], device=self.device, dtype=self.dtype\n )\n for i, (constraint_value, constraint) in enumerate(\n zip(constraint_values, constraints)\n ):\n stress = (\n self.objective_stress\n if isinstance(constraint, co.Objective)\n else self.constraint_stress\n )\n constraint_violation = th.zeros_like(\n constraint_values[0], device=self.device, dtype=self.dtype\n )\n if constraint.upper is not None and constraint.lower is not None:\n if constraint.upper == constraint.lower:\n constraint_violation = th.abs(constraint_value - constraint.upper)\n else:\n normed_constraint = (constraint_value - constraint.lower) / (\n constraint.upper - constraint.lower\n )\n constraint_violation = th.where(\n (normed_constraint < 0) | (normed_constraint > 1),\n (normed_constraint - 0.5),\n 0,\n )\n elif constraint.lower is not None:\n constraint_violation = th.relu(constraint.lower - constraint_value)\n elif constraint.upper is not None:\n constraint_violation = th.relu(constraint_value - constraint.upper)\n total_loss += (\n constraint_violation**2 + stress * (constraint_violation > 0).float()\n )\n\n return total_loss\n\n def objective_loss(\n self, objective_value: th.Tensor, objective: co.Objective\n ) -> th.Tensor:\n \"\"\"Compute the objective loss for a given objective value:\n - if no bounds are specified, use the squared objective value\n - if both bounds are specified, use the squared normalized\n objective value if it is within the bounds, otherwise\n add a stress term to a squared distance to middle of the bounds\n\n Parameters\n ----------\n objective_value : th.Tensor\n Tensor of objective values\n objective : co.Objective\n Objective function\n\n Returns\n -------\n th.Tensor\n Tensor of objective losses\n\n Raises\n ------\n NotImplementedError\n If only one bound is specified for the objective\n\n \"\"\"\n\n if objective.upper is None and objective.lower is None:\n loss = (\n th.sign(objective_value) * (objective_value**2) * objective.direction\n )\n elif objective.upper is not None and objective.lower is not None:\n norm_cst_obj_pred = (objective_value - objective.lower) / (\n objective.upper - objective.lower\n ) # scaled\n loss = th.where(\n (norm_cst_obj_pred < 0) | (norm_cst_obj_pred > 1),\n (norm_cst_obj_pred - 0.5) ** 2 + self.objective_stress,\n norm_cst_obj_pred * objective.direction,\n )\n else:\n raise NotImplementedError(\"Objective with only one bound is not supported\")\n return loss\n\n def _obj_forward(\n self,\n optimization_element: co.Constraint,\n input_data: Union[UdaoInput, Dict],\n ) -> th.Tensor:\n if isinstance(input_data, UdaoInput):\n return optimization_element.function(input_data) # type: ignore\n else:\n # Dict when unprocessed inputs\n return optimization_element.function(**input_data)\n\n def _compute_loss(\n self, problem: co.SOProblem, input_data: Union[UdaoInput, Dict]\n ) -> Dict[str, Any]:\n obj_output = self._obj_forward(problem.objective, input_data)\n objective_loss = self.objective_loss(obj_output, problem.objective)\n constraint_loss = th.zeros_like(objective_loss, device=self.device)\n\n if problem.constraints:\n const_outputs = [\n self._obj_forward(constraint, input_data)\n for constraint in problem.constraints\n ]\n constraint_loss = self.constraints_loss(const_outputs, problem.constraints)\n\n loss = objective_loss + constraint_loss\n min_loss_id = int(th.argmin(loss).cpu().item())\n\n return {\n \"sum_loss\": th.sum(loss),\n \"min_loss\": th.min(loss).cpu().item(),\n \"min_loss_id\": min_loss_id,\n \"best_obj\": obj_output[min_loss_id].cpu().item(),\n \"is_within_constraint\": bool((constraint_loss[min_loss_id] == 0).item()),\n }\n\n ##################\n ## _get (vars) ##\n ##################\n\n def get_meshed_categorical_vars(\n self, variables: Dict[str, co.Variable]\n ) -> Optional[np.ndarray]:\n \"\"\"\n Get combinations of all categorical (binary, enum) variables\n\n Parameters\n ----------\n variables : Dict[str, co.Variable]\n Variables to be optimized\n\n Returns\n -------\n Optional[np.ndarray]\n Combinations of all categorical variables\n of shape (n_samples, n_vars)\n \"\"\"\n cv_value_list = [\n variable.values\n for variable in variables.values()\n if isinstance(variable, co.EnumVariable)\n ]\n if not cv_value_list:\n return None\n meshed_cv_value_list = [x_.reshape(-1, 1) for x_ in np.meshgrid(*cv_value_list)]\n meshed_cv_value = np.concatenate(meshed_cv_value_list, axis=1)\n return meshed_cv_value\n\n ##################\n ## _check ##\n ##################\n\n @staticmethod\n def within_objective_bounds(obj_value: float, objective: co.Objective) -> bool:\n \"\"\"\n check whether violating the objective value var_ranges\n :param pred_dict: dict, keys are objective names,\n values are objective values\n :param obj_bounds: dict, keys are objective names,\n values are lower and upper var_ranges of each objective value\n :return: True or False\n \"\"\"\n within_bounds = True\n if objective.upper is not None:\n within_bounds = obj_value <= objective.upper\n if objective.lower is not None:\n within_bounds = within_bounds and obj_value >= objective.lower\n return within_bounds"
},
{
"identifier": "Point",
"path": "udao/optimization/utils/moo_utils.py",
"snippet": "class Point:\n def __init__(self, objs: np.ndarray, vars: Optional[Dict] = None) -> None:\n \"\"\"\n A point in the objective space.\n Variables are optional, and are not specified for imaginary points\n (e.g., utopia and nadir)\n\n Parameters\n ----------\n objs : np.ndarray\n Array of objective values of shape (n_objs,)\n vars :np.ndarray, optional\n Array of variable values of shape (n_vars,), by default None\n \"\"\"\n self.objs = objs\n self.vars = vars\n self.n_objs = objs.shape[0]\n\n def __repr__(self) -> str:\n return f\"Point(objs={self.objs}, vars={self.vars})\"\n\n def __eq__(self, other: \"Point\") -> bool: # type: ignore\n return bool(np.all(self.objs == other.objs) and np.all(self.vars == other.vars))"
}
] | from typing import cast
from ....model.utils.utils import set_deterministic_torch
from ....utils.interfaces import VarTypes
from ...concepts.problem import MOProblem
from ...moo.progressive_frontier import SequentialProgressiveFrontier
from ...soo.mogd import MOGD
from ...utils.moo_utils import Point
import numpy as np
import pytest
import torch as th | 10,570 |
@pytest.fixture
def spf(mogd: MOGD) -> SequentialProgressiveFrontier:
spf = SequentialProgressiveFrontier(
params=SequentialProgressiveFrontier.Params(),
solver=mogd,
)
return spf
class TestProgressiveFrontier:
def test__get_corner_points(self, spf: SequentialProgressiveFrontier) -> None:
utopia = Point(np.array([1, 0.3]))
nadir = Point(np.array([5, 10]))
corner_points = spf._get_corner_points(utopia, nadir)
# 1-------3#
# #
# 0-------2#
expected_points = [
Point(np.array([1.0, 0.3])),
Point(np.array([1.0, 10.0])),
Point(np.array([5.0, 0.3])),
Point(np.array([5.0, 10.0])),
]
assert all(c == e for c, e in zip(corner_points, expected_points))
def test__generate_sub_rectangles_bad(
self, spf: SequentialProgressiveFrontier
) -> None:
utopia = Point(np.array([1, 0.3]))
nadir = Point(np.array([5, 10]))
middle = Point((utopia.objs + nadir.objs) / 2)
rectangles = spf.generate_sub_rectangles(
utopia, nadir, middle, successful=False
)
############
# 0 | 1 #
############
# - | - #
############
assert len(rectangles) == 2
assert rectangles[0].utopia == Point(np.array([1.0, 5.15]))
assert rectangles[0].nadir == Point(np.array([3.0, 10]))
assert rectangles[1].utopia == Point(np.array([3.0, 5.15]))
assert rectangles[1].nadir == Point(np.array([5.0, 10]))
def test__generate_sub_rectangles_good(
self, spf: SequentialProgressiveFrontier
) -> None:
utopia = Point(np.array([1, 0.3]))
nadir = Point(np.array([5, 10]))
middle = Point((utopia.objs + nadir.objs) / 2)
rectangles = spf.generate_sub_rectangles(utopia, nadir, middle)
############
# 1 | _ #
############
# 0 | 2 #
############
assert len(rectangles) == 3
assert rectangles[0].utopia == Point(np.array([1.0, 0.3]))
assert rectangles[0].nadir == Point(np.array([3.0, 5.15]))
assert rectangles[1].utopia == Point(np.array([1.0, 5.15]))
assert rectangles[1].nadir == Point(np.array([3.0, 10.0]))
assert rectangles[2].utopia == Point(np.array([3.0, 0.3]))
assert rectangles[2].nadir == Point(np.array([5.0, 5.15]))
def test_get_utopia_and_nadir(self, spf: SequentialProgressiveFrontier) -> None:
points = [
Point(np.array([1, 5]), {"v1": 0.2, "v2": 1}),
Point(np.array([3, 10]), {"v1": 0.8, "v2": 6}),
Point(np.array([5, 0.3]), {"v1": 0.5, "v2": 3}),
]
utopia, nadir = spf.get_utopia_and_nadir(points)
np.testing.assert_array_equal(utopia.objs, np.array([1, 0.3]))
np.testing.assert_array_equal(nadir.objs, np.array([5, 10]))
def test_solve(
self,
spf: SequentialProgressiveFrontier,
|
@pytest.fixture
def spf(mogd: MOGD) -> SequentialProgressiveFrontier:
spf = SequentialProgressiveFrontier(
params=SequentialProgressiveFrontier.Params(),
solver=mogd,
)
return spf
class TestProgressiveFrontier:
def test__get_corner_points(self, spf: SequentialProgressiveFrontier) -> None:
utopia = Point(np.array([1, 0.3]))
nadir = Point(np.array([5, 10]))
corner_points = spf._get_corner_points(utopia, nadir)
# 1-------3#
# #
# 0-------2#
expected_points = [
Point(np.array([1.0, 0.3])),
Point(np.array([1.0, 10.0])),
Point(np.array([5.0, 0.3])),
Point(np.array([5.0, 10.0])),
]
assert all(c == e for c, e in zip(corner_points, expected_points))
def test__generate_sub_rectangles_bad(
self, spf: SequentialProgressiveFrontier
) -> None:
utopia = Point(np.array([1, 0.3]))
nadir = Point(np.array([5, 10]))
middle = Point((utopia.objs + nadir.objs) / 2)
rectangles = spf.generate_sub_rectangles(
utopia, nadir, middle, successful=False
)
############
# 0 | 1 #
############
# - | - #
############
assert len(rectangles) == 2
assert rectangles[0].utopia == Point(np.array([1.0, 5.15]))
assert rectangles[0].nadir == Point(np.array([3.0, 10]))
assert rectangles[1].utopia == Point(np.array([3.0, 5.15]))
assert rectangles[1].nadir == Point(np.array([5.0, 10]))
def test__generate_sub_rectangles_good(
self, spf: SequentialProgressiveFrontier
) -> None:
utopia = Point(np.array([1, 0.3]))
nadir = Point(np.array([5, 10]))
middle = Point((utopia.objs + nadir.objs) / 2)
rectangles = spf.generate_sub_rectangles(utopia, nadir, middle)
############
# 1 | _ #
############
# 0 | 2 #
############
assert len(rectangles) == 3
assert rectangles[0].utopia == Point(np.array([1.0, 0.3]))
assert rectangles[0].nadir == Point(np.array([3.0, 5.15]))
assert rectangles[1].utopia == Point(np.array([1.0, 5.15]))
assert rectangles[1].nadir == Point(np.array([3.0, 10.0]))
assert rectangles[2].utopia == Point(np.array([3.0, 0.3]))
assert rectangles[2].nadir == Point(np.array([5.0, 5.15]))
def test_get_utopia_and_nadir(self, spf: SequentialProgressiveFrontier) -> None:
points = [
Point(np.array([1, 5]), {"v1": 0.2, "v2": 1}),
Point(np.array([3, 10]), {"v1": 0.8, "v2": 6}),
Point(np.array([5, 0.3]), {"v1": 0.5, "v2": 3}),
]
utopia, nadir = spf.get_utopia_and_nadir(points)
np.testing.assert_array_equal(utopia.objs, np.array([1, 0.3]))
np.testing.assert_array_equal(nadir.objs, np.array([5, 10]))
def test_solve(
self,
spf: SequentialProgressiveFrontier, | two_obj_problem: MOProblem, | 2 | 2023-12-20 09:10:42+00:00 | 12k |
XLearning-SCU/2023-TPAMI-SMILE | Net.py | [
{
"identifier": "get_dist_release",
"path": "DistComput.py",
"snippet": "def get_dist_release(loader, dist_path):\r\n if not os.path.exists(dist_path):\r\n # loader = test_loader\r\n num_data = [10]\r\n with torch.no_grad():\r\n dist_list = [[] for i in range(len(num_data))]\r\n for j, data_t in enumerate(loader, 0):\r\n # get all inputs\r\n fea0, fea1, class_labels0, class_labels1, mask, is_pair, idx = data_t\r\n inputs_t = fea0.cuda()\r\n # inputs_t = torch.cat([fea0,fea1]).cuda()\r\n # labels_t = torch.cat([class_labels0,class_labels1]).cuda()\r\n # inputs_t, _, labels_t, _ = data_t\r\n # inputs_t, labels_t = inputs_t.cuda(), labels_t.cuda()\r\n for i in range(len(inputs_t)):\r\n if i % 1000 == 0:\r\n print(i)\r\n aa = torch.mul(inputs_t - inputs_t[i], inputs_t - inputs_t[i])\r\n # dist = torch.sqrt(torch.sum(aa, dim=(2, 3)))\r\n # dist_m = dist[:, 0]\r\n # print(aa.shape)\r\n dist_m = torch.sqrt(torch.sum(aa, dim=tuple(torch.arange(1, len(aa.shape)))))\r\n dist_m[i] = 1000\r\n sorted_dist = np.sort(dist_m.cpu().numpy())\r\n for jj in range(len(num_data)):\r\n dist_list[jj].append(sorted_dist[num_data[jj]])\r\n inputs_t = fea1.cuda()\r\n for i in range(len(inputs_t)):\r\n if i % 1000 == 0:\r\n print(i)\r\n aa = torch.mul(inputs_t - inputs_t[i], inputs_t - inputs_t[i])\r\n # dist = torch.sqrt(torch.sum(aa, dim=(2, 3)))\r\n # dist_m = dist[:, 0]\r\n # print(aa.shape)\r\n dist_m = torch.sqrt(torch.sum(aa, dim=tuple(torch.arange(1, len(aa.shape)))))\r\n dist_m[i] = 1000\r\n sorted_dist = np.sort(dist_m.cpu().numpy())\r\n for jj in range(len(num_data)):\r\n dist_list[jj].append(sorted_dist[num_data[jj]])\r\n for ii in range(len(num_data)):\r\n DirectoryOperator(dist_path).make_fold()\r\n np.savetxt(dist_path, np.array(dist_list[ii]))\r\n\r\n dist = torch.from_numpy(\r\n np.loadtxt(\r\n dist_path\r\n ).astype(np.float32)\r\n )\r\n return dist\r"
},
{
"identifier": "get_nearest_k",
"path": "_Utils/Calculator.py",
"snippet": "def get_nearest_k(h0, h1, k=1, sp_size=1000):\r\n hh0 = h0.half()\r\n hh1 = h1.half()\r\n split = int(np.ceil(len(hh0) / sp_size))\r\n near = []\r\n for i in range(split):\r\n dist = torch.cdist(hh0[i * sp_size:(i + 1) * sp_size], hh1)\r\n nearest = torch.argsort(dist, dim=1)[:, :k]\r\n near.append(nearest)\r\n nearest = torch.cat(near)\r\n return nearest\r"
},
{
"identifier": "update_log",
"path": "_Utils/Logs.py",
"snippet": "def update_log(dic, path='../log/res.csv'):\r\n index = 'Epoch'\r\n val = []\r\n name = []\r\n for na, v in dic.items():\r\n val.append(v)\r\n name.append(na)\r\n dt = pd.DataFrame([val], columns=name)\r\n dt = dt.set_index(index)\r\n if os.path.exists(path):\r\n dt_old = pd.read_csv(path, index_col=index)\r\n dt = merge_csv(dt_old, dt)\r\n DirectoryOperator(path).make_fold()\r\n dt.to_csv(path)\r"
},
{
"identifier": "visualize2",
"path": "_Utils/Scatter.py",
"snippet": "def visualize2(feature_vec, type_vec, group_vec, pred_vec, prefix, ):\r\n fv = feature_vec.reshape((len(feature_vec), -1))\r\n for perplexity in []:# 50\r\n vis_fea_multi = TSNE(perplexity=perplexity).fit_transform(\r\n np.concatenate((fv[group_vec == 0], fv[group_vec == 1]), axis=1)\r\n )\r\n for s in [5]:\r\n prefix2 = prefix + 'P{}S{}'.format(perplexity, s)\r\n visualize_scatter(vis_fea_multi,\r\n fig_path='{}Multi.svg'.format(prefix2),\r\n label_color=type_vec[group_vec == 0],\r\n # label_shape=type_vec,\r\n s=s\r\n )\r\n\r\n for perplexity in [50]:\r\n vis_fea = TSNE(perplexity=perplexity).fit_transform(fv)\r\n for s in [5]: # 5\r\n prefix2 = prefix + 'P{}S{}'.format(perplexity, s)\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Type.svg'.format(prefix2),\r\n label_color=type_vec,\r\n # label_shape=type_vec,\r\n s=s\r\n )\r\n # visualize_scatter(vis_fea,\r\n # fig_path='{}Cluster.svg'.format(prefix),\r\n # label_color=pred_vec,\r\n # label_shape=type_vec,\r\n #\r\n # )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Group.svg'.format(prefix2),\r\n label_color=group_vec,\r\n # label_shape=type_vec,\r\n s=s\r\n )\r"
},
{
"identifier": "visualize",
"path": "_Utils/Visualize.py",
"snippet": "def visualize(feature_vec, type_vec, group_vec, pred_vec, prefix='../Visualization/E{:03d}'.format(0)):\r\n vis_fea = tsne(feature_vec)\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Type.jpg'.format(prefix),\r\n label_color=type_vec,\r\n label_shape=type_vec,\r\n )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Cluster.jpg'.format(prefix),\r\n label_color=pred_vec,\r\n label_shape=type_vec,\r\n )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Group.jpg'.format(prefix),\r\n label_color=group_vec,\r\n label_shape=type_vec,\r\n )\r"
},
{
"identifier": "visual_matrix_console",
"path": "_Utils/Visualize.py",
"snippet": "def visual_matrix_console(x):\r\n if len(x.shape) <= 2:\r\n x = x.reshape((*x.shape, 1))\r\n base_wid = int(np.log10(np.max(x) + 0.5)) + 1\r\n head_wid = x.shape[2] * (1 + base_wid)\r\n head_sep = int(head_wid // 2) + 1\r\n print('t\\\\c ', end='')\r\n for i in range(x.shape[1]):\r\n print(('{:' + '{}'.format(head_sep) + 'd}').format(i), end=' ' * (head_wid - head_sep))\r\n print()\r\n for i, line in enumerate(x):\r\n print('{:2d}: '.format(i), end='')\r\n for cl in line:\r\n sg = True\r\n for g in cl:\r\n if sg:\r\n sg = False\r\n else:\r\n print(' ', end='')\r\n if g != 0:\r\n # print('base_wid == {}'.format(base_wid))\r\n # print('g == {}'.format(g))\r\n print(('{:' + str(base_wid) + 'd}').format(g), end='')\r\n else:\r\n print(' ' * base_wid, end='')\r\n print('|', end='')\r\n print()\r"
},
{
"identifier": "visualize_image",
"path": "_Utils/Visualize.py",
"snippet": "def visualize_image(x, verbose=0, show=False, fig_path=None):\r\n \"\"\"\r\n\r\n :param show:\r\n :param fig_path:\r\n :param x:\r\n (row, line, pic_h, pic_w) or (row, line, pic_h, pic_w, pic_c), pic_c = 1,3,4\r\n :return:\r\n \"\"\"\r\n x = np.asarray(x)\r\n if verbose:\r\n print('img.min() == {}'.format(np.min(x)))\r\n print('img.max() == {}'.format(np.max(x)))\r\n x -= np.min(x)\r\n x /= np.max(x)\r\n row, line = x.shape[:2]\r\n w, h = x.shape[1] * x.shape[3] / 90, x.shape[0] * x.shape[2] / 90\r\n plt.figure(figsize=(w, h)) # w, h\r\n count = 0\r\n for rx in x:\r\n for image in rx:\r\n count += 1\r\n plt.subplot(row, line, count)\r\n plt.imshow(image, cmap='gray', )\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n plt.subplots_adjust(left=0, right=1, top=1, bottom=0, hspace=0.1 / h, wspace=0.1 / w)\r\n\r\n if not show and fig_path is None:\r\n fig_path = '../_fig/fig.jpg'\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path, transparent=True)\r\n if show:\r\n plt.show()\r\n plt.close()\r"
},
{
"identifier": "plot_heat_map",
"path": "_Utils/Visualize.py",
"snippet": "def plot_heat_map(z, xticks=None, yticks=None, xlabel=None, ylabel=None, title=None, show=False, fig_path=None):\r\n \"\"\"\r\n\r\n :param z: z[i,j] shown in i-th row, j-th line\r\n :param xlabel:\r\n :param ylabel:\r\n :param show:\r\n :param fig_path:\r\n :return:\r\n \"\"\"\r\n left = 0.15\r\n right = 1\r\n top = 0.95\r\n bottom = 0.15\r\n w, h = z.shape\r\n plt.figure(figsize=(w / (right - left), h / (top - bottom)))\r\n\r\n # plt.figure(figsize=(w / (right - left), h / (top - bottom)))\r\n # plt.subplots_adjust(left=left, right=right, top=top, bottom=bottom)\r\n\r\n if xticks is not None:\r\n plt.xticks(np.arange(len(xticks)), np.round(xticks, 2), rotation=45)\r\n if yticks is not None:\r\n plt.yticks(np.arange(len(yticks)), np.round(yticks, 2))\r\n for i in range(z.shape[0]):\r\n for j in range(z.shape[1]):\r\n # plt.text(j, i, accs[i, j].round(2), ha=\"center\", va=\"center\", color=\"b\", fontsize=12,\r\n # fontname='Times New Roman')\r\n plt.text(j, i, z[i, j], ha=\"center\", va=\"center\")\r\n\r\n if xlabel is not None:\r\n plt.xlabel(xlabel)\r\n if ylabel is not None:\r\n plt.ylabel(ylabel)\r\n if title is not None:\r\n plt.title(title)\r\n plt.imshow(z, interpolation='nearest', aspect='auto')\r\n\r\n plt.colorbar()\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path, transparent=True)\r\n if show:\r\n plt.show()\r\n plt.close()\r"
},
{
"identifier": "TimeOperator",
"path": "_Utils/TimeOperator.py",
"snippet": "class TimeOperator:\r\n def __init__(self):\r\n self.time_buffer = None\r\n self.time_record = 0\r\n self.time_sum = 0\r\n self.time_count = 0\r\n\r\n def time(self, output=False, promt=''):\r\n if self.time_buffer is None:\r\n self.time_buffer = time()\r\n else:\r\n self.time_record = time() - self.time_buffer\r\n self.time_buffer = None\r\n self.time_sum += self.time_record\r\n self.time_count += 1\r\n if output:\r\n print('{}Time == {:7.05f}'.format(promt, self.time_record))\r\n\r\n def get_time_sum(self):\r\n return self.time_sum\r\n\r\n def show_time_sum(self):\r\n print('{:.02f}'.format(self.get_time_sum()))\r\n\r\n def get_fps(self):\r\n return self.time_count / self.time_sum\r\n\r\n def __get_speed(self, to_metric=None):\r\n speed = self.get_fps()\r\n metric = 'Second'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 60\r\n metric = 'Minute'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 60\r\n metric = 'Hour'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 24\r\n metric = 'Day'\r\n return speed, metric\r\n\r\n def show_process(self, process_now, process_total, name='Epoch'):\r\n if self.time_sum <= 0:\r\n return\r\n speed = self.time_sum / self.time_count\r\n print('{:<5s} [{:3.0f}/{:3.0f}] [{:8.02f}/{:8.02f}]: {:5.02f}({:5.02f}) '.format(\r\n name, process_now, process_total,\r\n process_now * speed, process_total * speed,\r\n self.time_record, speed\r\n ))\r\n\r\n def show_speed(self):\r\n speed, metric = self.__get_speed()\r\n print('{:4.01f} Frames/{}'.format(speed, metric))\r"
},
{
"identifier": "DirectoryOperator",
"path": "_Utils/DirectoryOperator.py",
"snippet": "class DirectoryOperator:\r\n def __init__(self, directory: str):\r\n self.directory = directory\r\n\r\n def make_fold(self):\r\n if not TestMode:\r\n # print('mk dir {}'.format(os.path.dirname(self.directory)))\r\n os.makedirs(os.path.dirname(self.directory), exist_ok=True)\r\n\r\n def modification_time(self):\r\n if os.path.exists(self.directory):\r\n return os.path.getmtime(self.directory)\r\n else:\r\n warnings.warn('Time_now is returned since the modification time for non-exist file is not available. File: {}'.format(self.directory))\r\n return time.time()\r"
},
{
"identifier": "get_clusters",
"path": "DataSetMaster/dataset.py",
"snippet": "def get_clusters(args):\n item_path = os.path.join(path_operator.get_checkpoint_path(level=1), 'Items0321')\n file_mnist_test = os.path.join(item_path, 'mnist_test_clusters89.67.txt')\n file_mnist_train = os.path.join(item_path, 'MnistTrain94.31B256.txt')\n file_amazon = os.path.join(item_path, 'amazon72.81B032ReValue.txt')\n file_webcam = os.path.join(item_path, 'webcamOurLoaderRevalveBatchWiseB032_84.03.txt')\n file_usps = os.path.join(item_path, 'usps_train_clusters85.10.txt')\n root_har = os.path.join(item_path, 'HAR')\n root_mtfl = os.path.join(item_path, 'MTFL')\n\n if args.dataset == 'MNISTUSPS': # 87.75 93.31\n if args.MnistTrain:\n file_mnist = file_mnist_train\n else:\n file_mnist = file_mnist_test\n file_list = [\n file_mnist,\n file_usps,\n ]\n elif args.dataset == 'ReverseMNIST': # 89.67 94.31\n if args.MnistTrain:\n file_mnist = file_mnist_train\n else:\n file_mnist = file_mnist_test\n file_list = [\n file_mnist,\n file_mnist,\n ]\n elif args.dataset == 'Office': # 75.28\n file_list = [\n file_amazon,\n file_webcam,\n ]\n elif args.dataset == 'MTFL':\n file_list = np.sort([os.path.join(root_mtfl, f) for f in os.listdir(root_mtfl) if f.endswith('txt')])\n elif args.dataset == 'HAR': # 81.70\n file_list = np.sort([os.path.join(root_har, f) for f in os.listdir(root_har) if f.endswith('txt')])\n else:\n raise NotImplementedError(\"\")\n\n def debug(x):\n print(x.shape)\n return x\n\n clusters = torch.cat(\n [debug(torch.from_numpy(np.loadtxt(c).astype(np.float32)).long()) for c in file_list],\n dim=0,\n ).cuda()\n return clusters"
},
{
"identifier": "svm_classify",
"path": "classification.py",
"snippet": "def svm_classify(data, data_gt, label, test_prop, C):\n \"\"\"\n trains a linear SVM on the data\n input C specifies the penalty factor of SVM\n \"\"\"\n seed = random.randint(0, 1000)\n train_idx, test_idx = TT_split(data.shape[1], test_prop, seed)\n train_data = np.concatenate([data[0][train_idx], data[1][train_idx]], axis=1)\n test_data = np.concatenate([data_gt[0][test_idx], data_gt[1][test_idx]], axis=1)\n test_label = label[test_idx]\n train_label = label[train_idx]\n\n # print('training SVM...')\n clf = svm.LinearSVC(C=C, dual=False)\n clf.fit(train_data, train_label.ravel())\n\n p = clf.predict(test_data)\n test_acc = accuracy_score(test_label, p)\n\n return test_acc"
},
{
"identifier": "UMAP",
"path": "evaluate.py",
"snippet": "def UMAP(feature_vec, type_vec, group_vec, pred_vec, n_type, n_batch, args, epoch, dst_root='../Visualization'):\n t = time.time()\n # print(\"Performing UMAP Visualization...\")\n # print('feature_vec.shape == {}'.format(feature_vec.shape))\n sc.set_figure_params(figsize=(4, 4), dpi=300)\n\n # type_vec = pd.DataFrame(type_vec)\n # for key in cell_type_dict.keys():\n # type_vec.replace(key, cell_type_dict[key], inplace=True)\n # group_vec = pd.DataFrame(group_vec)\n # for key in batch_dict.keys():\n # batch_vec.replace(key, batch_dict[key], inplace=True)\n\n adata = sc.AnnData(feature_vec)\n # print('adata.shape == {}'.format(adata.shape))\n sc.pp.neighbors(adata)\n adata.obs['cluster'] = pd.DataFrame(pred_vec).values.astype(np.str_)\n adata.obs['type'] = pd.DataFrame(type_vec).values.astype(np.str_)\n adata.obs['group'] = pd.DataFrame(group_vec).values.astype(np.str_)\n\n sc.tl.umap(adata)\n sc.pl.umap(adata,\n color=['cluster'],\n palette=sns.color_palette(\"husl\", n_type),\n save='E{:03d}UmapCluster{}.png'.format(epoch, str(args.dataset)),\n show=False)\n sc.pl.umap(adata,\n color=['type'],\n palette=sns.color_palette(\"husl\", n_type),\n save='E{:03d}UmapType{}.png'.format(epoch, str(args.dataset)),\n show=False)\n sc.pl.umap(adata,\n color=['group'],\n palette=sns.color_palette(\"hls\", n_batch),\n save='E{:03d}UmapGroup{}.png'.format(epoch, str(args.dataset)),\n show=False)\n roott = './figures/'\n for root, dirs, files in os.walk(roott):\n # print(root)\n # print(dirs)\n # print(files)\n for f in files:\n # print(os.path.join('../Visualization', f))\n FileOperator(\n os.path.join(root, f)\n ).rename(\n os.path.join(dst_root, f.replace('umapE', 'E')),\n auto_rename=False\n )\n if PrintTimer:\n print('VisualizeScatter finished with in {:.03f} seconds (x.shape == {}).'.format(\n time.time() - t,\n feature_vec.shape,\n ))"
},
{
"identifier": "evaluate2",
"path": "evaluate.py",
"snippet": "def evaluate2(feature_vec, pred_vec, type_vec, group_vec):\n nmi, ari, acc, pred_adjusted = cluster_metrics(type_vec, pred_vec)\n gs = np.unique(group_vec)\n ts = np.unique(type_vec)\n class_num = len(ts)\n group_num = len(gs)\n if group_vec is not None and group_num > 1:\n balance, entro = my_balance(pred_vec, group_vec, cluster_num=np.unique(type_vec).shape[0],\n group_num=np.unique(group_vec).shape[0])\n O = torch.zeros((class_num, group_num)).cuda()\n\n for b in gs:\n ind_g = b == group_vec\n pred_vec_g = pred_vec[ind_g]\n for t in ts:\n O[t, b] = np.sum(pred_vec_g == t)\n O += 1e-6\n O = (O / torch.sum(O))\n NmiFair = normalized_mutual_information(O).cpu().numpy()\n Fmeasure = FMeasure(beta=1)(acc, NmiFair)\n else:\n balance, entro = 0, 0\n NmiFair = 0\n Fmeasure = 0\n entro_v = np.mean(entro)\n global BestAcc, BestAri, BestNmi, BestBalance, BestEntropy, BestFairness, BestNmiFair, BestFmeasure\n if BestAcc < acc:\n BestAcc = acc\n if BestAri < ari:\n BestAri = ari\n if BestNmi < nmi:\n BestNmi = nmi\n if BestBalance < balance:\n BestBalance = balance\n # if BestFairness < fairness:\n # BestFairness = fairness\n if BestNmiFair < NmiFair:\n BestNmiFair = NmiFair\n if BestFmeasure < Fmeasure:\n BestFmeasure = Fmeasure\n if BestEntropy < entro_v:\n BestEntropy = entro_v\n\n print(\n 'NMI={:5.02f}|{:5.02f}, ARI={:5.02f}|{:5.02f}, ACC={:5.02f}|{:5.02f}, Balance={:5.02f}|{:5.02f}, NmiFair={:5.02f}|{:5.02f}, Fmeasure={:5.02f}|{:5.02f}, Entropy={:5.02f}|{:5.02f}[{}],'.format(\n nmi * 100, BestNmi * 100,\n ari * 100, BestAri * 100,\n acc * 100, BestAcc * 100,\n balance * 100, BestBalance * 100,\n # fairness * 100, BestFairness * 100,\n NmiFair * 100, BestNmiFair * 100,\n Fmeasure * 100, BestFmeasure * 100,\n entro_v, BestEntropy, entro\n )\n )\n met = {\n 'nmi' : nmi,\n 'ari' : ari,\n 'acc' : acc,\n 'balance' : balance,\n 'NmiFair' : NmiFair,\n 'Fmeasure': Fmeasure,\n }\n return pred_adjusted, met\n # tqdm.write('NMI=%.4f, ACC=%.4f, ARI=%.4f' % (nmi, acc, ari), end='')\n # if fair_metric:\n # kl, ari_b = fair_metrics(feature_vec, group_vec, pred_vec, type_vec)\n # print(', KL=%.4f, ARI_b=%.4f' % (kl, ari_b), end='')\n # tqdm.write('')"
},
{
"identifier": "visual_image_scatter",
"path": "figures/ScatterMaster.py",
"snippet": "def visual_image_scatter():\r\n np_path = os.path.join(\r\n 'D:/VirtualMachine/Codes/230904/SMAIL_RunSet_Visual/ --QuickConfig C100 --VisualFreq 5 --VisualRandom 1 --dataset NoisyMNIST30000 --seed 1999 --train_epoch 100/Checkpoints/Epoch099.npz')\r\n # np_path_row = os.path.join(root, np_paths[np_names.index(np_tag)], 'NpPoints', np_epoch)\r\n\r\n data = np.load(np_path, allow_pickle=False)\r\n data_vec = data['data_vec']\r\n feature_vec = data['feature_vec']\r\n group_vec = data['group_vec']\r\n type_vec = data['type_vec']\r\n\r\n # visualize_image(x=[\r\n # [it.reshape([28, 28]) for it in data_vec[:10]],\r\n # [it.reshape([28, 28]) for it in data_vec[10:20]],\r\n # [it.reshape([28, 28]) for it in data_vec[20:30]],\r\n # ], show=True)\r\n\r\n DrawMax = 3000\r\n if len(feature_vec) > DrawMax:\r\n it = np.arange(len(feature_vec))\r\n np.random.shuffle(it)\r\n ind = it[:DrawMax]\r\n feature_vec = feature_vec[ind]\r\n type_vec = type_vec[ind]\r\n group_vec = group_vec[ind]\r\n data_vec = data_vec[ind]\r\n vis_fea = TSNE(perplexity=50).fit_transform(feature_vec)\r\n\r\n _, ax = plt.subplots(figsize=(5 * 1 * 2, 5 * 1 * 2 / 1.6))\r\n\r\n label_color = np.unique(type_vec)\r\n color_num = len(np.unique(type_vec))\r\n # if color_num <= 2:\r\n # cmap = None\r\n if color_num <= 10:\r\n cmap = 'tab10'\r\n elif color_num <= 20:\r\n cmap = 'tab20'\r\n else:\r\n cmap = 'gist_ncar'\r\n for digit in np.unique(type_vec):\r\n ax.scatter(\r\n *vis_fea[type_vec == digit].T,\r\n # marker=f\"${digit}$\",\r\n s=0.5,\r\n # color=plt.cm.Dark2(digit),\r\n alpha=0.7,\r\n c=type_vec[type_vec == digit],\r\n cmap=cmap,\r\n vmax=max(4, np.max(label_color)),\r\n vmin=min(0, np.min(label_color)),\r\n zorder=2,\r\n )\r\n w = int(np.sqrt(len(data_vec[0])))\r\n h = w\r\n shown_images = np.array([[1.0, 1.0]]) # just something big\r\n for i in range(data_vec.shape[0]):\r\n # plot every digit on the embedding\r\n # show an annotation box for a group of digits\r\n dist = np.sum((vis_fea[i] - shown_images) ** 2, 1)\r\n if np.min(dist) < 2e1:\r\n # don't show points that are too close\r\n continue\r\n if np.min(dist) < 2e1:\r\n # don't show points that are too close\r\n continue\r\n shown_images = np.concatenate([shown_images, [vis_fea[i]]], axis=0)\r\n # img = offsetbox.OffsetImage(data_vec[i].reshape([w, h]), cmap=plt.cm.gray_r, )\r\n img = offsetbox.OffsetImage(data_vec[i].reshape([w, h]), cmap=plt.cm.gray_r, zoom=0.5)\r\n # img.ti\r\n imagebox = offsetbox.AnnotationBbox(\r\n img, # [w, h, 3]\r\n vis_fea[i],\r\n pad=0,\r\n frameon=False\r\n )\r\n imagebox.set(zorder=1)\r\n ax.add_artist(imagebox)\r\n\r\n ax.set_title('title')\r\n ax.axis(\"off\")\r\n plt.tight_layout()\r\n plt.savefig('D:/Pengxin/Temp/tmp.pdf')\r\n plt.show()\r\n\r\n print()\r\n pass\r"
}
] | import math
import os
import time
import warnings
import numpy as np
import torch
import torchvision
import torch.nn.functional as F
import evaluate
import faiss
import scipy.io as sio
from torch import nn
from torch.autograd import Variable
from DistComput import get_dist_release
from _Utils.Calculator import get_nearest_k
from _Utils.Logs import update_log
from _Utils.Scatter import visualize2
from _Utils.Visualize import visualize, visual_matrix_console, visualize_image, plot_heat_map
from _Utils import TimeOperator, DirectoryOperator
from DataSetMaster.dataset import get_clusters
from classification import svm_classify
from evaluate import UMAP, evaluate2
from sklearn import metrics
from munkres import Munkres
from figures.ScatterMaster import visual_image_scatter
| 10,783 | # class_labels1[is_pair == 0] = class_labels1[is_pair == 0][nearest[:, 0]]
elif args.reAlign == 'Copy':
if torch.sum(to_realign):
h1[to_realign] = h0[to_realign]
# class_labels1[is_pair == 0] = class_labels0[is_pair == 0]
elif args.reAlign == 'KnnMapMean':
if torch.sum(to_realign):
targ_v1 = h1[is_pair]
nearest = get_nearest_k(h0[to_realign], h0[is_pair], args.reAlignK)
h1[to_realign] = torch.cat([torch.mean(targ_v1[ns], dim=0) for ns in nearest])
# class_labels1[is_pair == 0] = ...
elif args.reAlign == 'Ignore':
pass
else:
raise NotImplementedError('')
if args.Rev:
fea0_rec, fea1_rec = self.decode([h1, h0])
else:
fea0_rec, fea1_rec = self.decode([h0, h1])
# if len(fea0_rec[0]) == len(fea1_rec[0]):
# fea_rec = torch.concat([fea0_rec, fea1_rec])
# fea = torch.concat([fea0, fea1])
# mask_c = torch.concat([mask[:, 0], mask[:, 1]])
# if torch.sum(mask_c == 0):
# rnmse_vec[0].extend(
# evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 0], xs=fea[mask_c == 0]).cpu().numpy())
# if torch.sum(mask_c == 1):
# rnmse_vec[1].extend(
# evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 1], xs=fea[mask_c == 1]).cpu().numpy())
# else:
# if torch.sum(mask == 0):
# n0_v0 = evaluate.get_rnmse(
# xs_hat=fea0_rec[mask[:, 0] == 0], xs=fea0[mask[:, 0] == 0]).cpu().numpy()
# n0_v1 = evaluate.get_rnmse(
# xs_hat=fea1_rec[mask[:, 1] == 0], xs=fea1[mask[:, 1] == 0]).cpu().numpy()
# rnmse_vec[0].extend(n0_v0)
# rnmse_vec[0].extend(n0_v1)
# if torch.sum(mask == 1):
# n1_v0 = evaluate.get_rnmse(
# xs_hat=fea0_rec[mask[:, 0] == 1], xs=fea0[mask[:, 0] == 1]).cpu().numpy()
# n1_v1 = evaluate.get_rnmse(
# xs_hat=fea1_rec[mask[:, 1] == 1], xs=fea1[mask[:, 1] == 1]).cpu().numpy()
# rnmse_vec[1].extend(n1_v0)
# rnmse_vec[1].extend(n1_v1)
g = torch.concat((torch.zeros(len(fea0), device=fea0.device, dtype=torch.int),
torch.ones(len(fea1), device=fea0.device, dtype=torch.int)))
h = torch.cat([h0, h1]).detach().cpu().numpy()
feature_vec.extend(h)
data_vec.extend(torch.cat([fea0, fea1]).detach().cpu().numpy())
group_vec.extend(g.cpu().numpy())
type_vec.extend(torch.concat((class_labels0, class_labels1)).numpy())
inf_data_t = time.time()
feature_vec = np.array(feature_vec)
data_vec = np.array(data_vec)
feature_vec_cluster = np.array(feature_vec_cluster)
is_pair_all = np.array(is_pair_all)
feature_vec_classification = np.array(feature_vec_classification)
group_vec = np.array(group_vec)
group_vec_cluster = np.array(group_vec_cluster)
type_vec = np.array(type_vec)
type_vec_cluster = np.array(type_vec_cluster)
rnmse_vec[0] = np.array(rnmse_vec[0])
rnmse_vec[1] = np.array(rnmse_vec[1])
kmeans_time = TimeOperator.Timer()
if args.ShowReconstruct:
if args.dataset == 'MNISTUSPS':
dims = [np.product(d.data.shape[1:]) for d in test_dataloader.dataset.datasets]
data_list = [np.asarray(it.data, dtype=np.float32) for it in test_dataloader.dataset.datasets]
Y = test_dataloader.dataset.datasets[0].targets
else:
dims = [d.shape[1] for d in test_dataloader.dataset.data]
data_list = [np.asarray(it, dtype=np.float32) for it in test_dataloader.dataset.data]
Y = test_dataloader.dataset.class_labels0
mask = test_dataloader.dataset.mask
n_per_cat = 10
rec0, rec1 = self.decode([
torch.from_numpy(feature_vec[group_vec == 0]).cuda(),
torch.from_numpy(feature_vec[group_vec == 1]).cuda()])
rec0 = rec0.detach().cpu().numpy()
rec1 = rec1.detach().cpu().numpy()
show_img = np.asarray([])
inds_map = np.asarray([])
for v in range(2):
col = np.asarray([])
inds_map_col = np.asarray([])
for y in range(10):
inds = np.arange(len(Y))[
np.logical_and(np.logical_and(mask[:, v] == 1, mask[:, 1 - v] == 0), Y == y)
]
np.random.shuffle(inds)
assert len(inds) >= n_per_cat
inds = inds[:n_per_cat]
raw_imgs = data_list[v][inds]
missing_imgs = data_list[1 - v][inds]
rec_imgs = [rec0, rec1][v][inds]
rec_imgs_miss = [rec0, rec1][1 - v][inds]
pack = np.asarray(
[raw_imgs, rec_imgs, missing_imgs, rec_imgs_miss]).reshape([-1, n_per_cat, 28, 28])
if len(col):
col = np.concatenate([col, pack], axis=0)
else:
col = pack
if len(inds_map_col):
inds_map_col = np.concatenate([inds_map_col, inds.reshape([1, -1])], axis=0)
else:
inds_map_col = inds.reshape([1, -1])
if len(show_img):
show_img = np.concatenate([show_img, col], axis=1)
else:
show_img = col
if len(inds_map):
inds_map = np.concatenate([inds_map, inds_map_col], axis=1)
else:
inds_map = inds_map_col
|
def show_distribution_ct(type_vec, group_vec, pred_vec, class_num, group_num):
v = np.zeros((class_num, class_num, group_num), dtype=int)
for t, c, g in zip(type_vec, pred_vec, group_vec):
v[t, c, g] += 1
visual_matrix_console(x=v)
def kmeans(feature_vec, class_num):
d = feature_vec.shape[1]
kmeans = faiss.Clustering(d, class_num)
kmeans.verbose = False
kmeans.niter = 300
kmeans.nredo = 10
# kmeans.spherical = True
# if LimitKmeans:
# kmeans.max_points_per_centroid = 1000
# kmeans.min_points_per_centroid = 10
res = faiss.StandardGpuResources()
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = True
cfg.device = 0
index = faiss.GpuIndexFlatL2(res, d, cfg)
# print(feature_vec.shape)
kmeans.train(feature_vec, index)
centroids = faiss.vector_to_array(kmeans.centroids).reshape(class_num, d)
return centroids
def show_distribution(cluster_vec, group_vec, class_num, group_num):
for it in np.arange(group_num):
print('{:4d}, '.format(it), end='')
print('')
cluster_group = torch.zeros((class_num, group_num), dtype=torch.int)
for i, j in zip(cluster_vec, group_vec):
cluster_group[i, j] += 1
# cluster_group = cluster_group[torch.argsort(torch.sum(cluster_group, dim=1))]
for line in cluster_group:
print('{:4d}: '.format(torch.sum(line)), end='')
for it in line:
print('{:4d}, '.format(it), end='')
print('')
def save_checkpoint(state, epoch):
"""
it has been trained for *epoch* epochs
"""
filename = 'Epoch{:03d}.checkpoint'.format(epoch)
checkpoint_dir = os.path.join(
os.path.dirname(os.getcwd()),
'Checkpoints',
filename
)
DirectoryOperator.FoldOperator(directory=checkpoint_dir).make_fold()
if os.path.exists(checkpoint_dir):
warnings.warn('Checkpoint exist and been replaced.({})'.format(checkpoint_dir))
print('Save check point into {}'.format(checkpoint_dir))
torch.save(state, checkpoint_dir)
def get_ffn(dims, last_layers=None, with_bn=False, drop_out=0):
layers = []
for ind in range(len(dims) - 1):
in_dim = dims[ind]
out_dim = dims[ind + 1]
layers.append(nn.Linear(in_dim, out_dim))
if with_bn:
layers.append(nn.BatchNorm1d(out_dim))
layers.append(nn.ReLU())
if drop_out:
layers.append(nn.Dropout(drop_out))
if last_layers is not None:
layers.extend(last_layers)
return nn.Sequential(*layers)
def get_cov(dims, strides, last_layers=None, with_bn=False, drop_out=0):
layers = []
for ind in range(len(dims) - 1):
in_dim = dims[ind]
out_dim = dims[ind + 1]
stride = strides[ind]
# layers.append(nn.Linear(in_dim, out_dim))
if stride >= 0:
layers.append(nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=stride, padding=1))
else:
layers.append(nn.ConvTranspose2d(
in_dim, out_dim, kernel_size=3, stride=-stride, padding=1, output_padding=0 if stride == -1 else 1))
if with_bn:
# layers.append(nn.BatchNorm1d(out_dim))
layers.append(nn.BatchNorm2d(out_dim))
layers.append(nn.ReLU())
if drop_out:
layers.append(nn.Dropout(drop_out))
if last_layers is not None:
layers.extend(last_layers)
return nn.Sequential(*layers)
class Net(nn.Module):
def __init__(self, args, in_dims, class_num, group_num):
super(Net, self).__init__()
self.encoder_adaption = nn.ModuleList([
get_ffn([in_dims[i], 1024], with_bn=args.BatchNormType[0] == '1', drop_out=args.Dropout)
for i in range(group_num if args.GroupWiseLayer[0] == '1' else 1)])
self.encoder = nn.ModuleList([
get_ffn([1024, 1024, 512], with_bn=args.BatchNormType[1] == '1', drop_out=args.Dropout)
for _ in range(group_num if args.GroupWiseLayer[1] == '1' else 1)])
if args.representation_dim == 0:
args.representation_dim = class_num
self.class_num = class_num
self.group_num = group_num
self.pred_cac = None
self.pred_center_cac = None
if args.ElActivationType == 'None':
el_activation_ = []
elif args.ElActivationType == 'Normalize':
el_activation_ = []
elif args.ElActivationType == 'BnNormalize':
el_activation_ = [nn.BatchNorm1d(args.representation_dim)]
elif args.ElActivationType == 'BnReNormalize':
el_activation_ = [nn.BatchNorm1d(args.representation_dim), nn.ReLU()]
elif args.ElActivationType == 'BnRe':
el_activation_ = [nn.BatchNorm1d(args.representation_dim), nn.ReLU()]
else:
raise NotImplementedError('')
self.el_activation_ = el_activation_
self.encoder_linear = nn.ModuleList([
get_ffn([512, 256], with_bn=args.BatchNormType[2] == '1', drop_out=args.Dropout,
last_layers=[nn.Linear(256, args.representation_dim)] + self.el_activation_)
for _ in range(group_num if args.GroupWiseLayer[2] == '1' else 1)])
dec_in = args.representation_dim
if args.McDecoder:
dec_in *= group_num
self.dec_in = dec_in
self.decoder_linear = nn.ModuleList([
get_ffn([self.dec_in, 256, 512], with_bn=args.BatchNormType[3] == '1', drop_out=args.Dropout)
for _ in range(group_num if args.GroupWiseLayer[3] == '1' else 1)])
if args.ActivationType == 'None':
final_activation_ = []
elif args.ActivationType == 'Sigmoid':
final_activation_ = [nn.Sigmoid()]
elif args.ActivationType == 'Tanh':
final_activation_ = [nn.Tanh()]
else:
raise NotImplementedError('')
self.final_activation_ = final_activation_
self.decoder = nn.ModuleList([
get_ffn([512, 1024, 1024], with_bn=args.BatchNormType[4] == '1', drop_out=args.Dropout)
for _ in range(group_num if args.GroupWiseLayer[4] == '1' else 1)])
self.decoder_adaption = nn.ModuleList([
get_ffn([], last_layers=[nn.Linear(1024, in_dims[i])] + self.final_activation_)
for i in range(group_num if args.GroupWiseLayer[5] == '1' else 1)])
self.args = args
self.in_dims = in_dims
# def update_cluster_center(self, center):
# self.cluster_centers = F.normalize(torch.from_numpy(center), dim=1).cuda()
def forward(self, x, **kwargs):
return self.decode(self.encode([x]))
def encode(self, xs: list):
hs = []
for g, x in enumerate(xs):
if self.args.noise_type == 'None':
pass
elif self.args.noise_type == 'Drop':
x = x * (Variable(x.data.new(x.size()).normal_(0, 0.1)) < self.args.noise_weight).type_as(x)
elif self.args.noise_type == 'Add':
x = x + Variable(x.data.new(x.size()).normal_(0, self.args.noise_weight)).type_as(x)
else:
raise NotImplementedError('')
if len(x) != 0:
if len(x) == 1:
x = torch.concat([x, x])
# print(x.shape)
# x = x.view((len(x), -1))
# print(x.shape)
x = self.encoder_adaption[g if self.args.GroupWiseLayer[0] == '1' else 0](x)
x = self.encoder[g if self.args.GroupWiseLayer[1] == '1' else 0](x)
x = self.encoder_linear[g if self.args.GroupWiseLayer[2] == '1' else 0](x)
if len(x) == 1:
x = x[[0]]
if self.args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']:
x = F.normalize(x, dim=1)
else:
x = torch.zeros([0, self.args.representation_dim], device=torch.device('cuda:0'))
hs.append(x)
return hs
def soft_ass(self, h, centroids):
if self.args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']:
return h @ centroids.T
else:
dst = torch.cdist(h, centroids)
# return (torch.mean(dst) - dst) / (torch.amax(dst) - torch.amin(dst)) * 2
return -dst / 2
# def encode_class(self, hs):
# cs = []
# for h in hs:
# c = h @ self.cluster_centers.T
# cs.append(c)
# return cs
def decode(self, hs):
xs = []
for g, h in enumerate(hs):
if self.args.McDecoder:
h = torch.cat(hs, dim=1)
if len(h) != 0:
if len(h) == 1:
h = torch.concat([h, h])
h = self.decoder_linear[g if self.args.GroupWiseLayer[3] == '1' else 0](h)
h = self.decoder[g if self.args.GroupWiseLayer[4] == '1' else 0](h)
h = self.decoder_adaption[g if self.args.GroupWiseLayer[5] == '1' else 0](h)
if len(h) == 1:
h = h[[0]]
else:
h = torch.zeros([0, self.in_dims[g]], device=torch.device('cuda:0'))
xs.append(h)
return xs
def run(self, epochs, train_dataloader, test_dataloader, args):
# if args.loss_self_cons:
# clusters = get_clusters(args=args)
optimizer_g = torch.optim.Adam(
self.parameters(),
lr=args.LearnRate,
betas=(args.betas_a, args.betas_v),
weight_decay=args.WeightDecay
)
mse_loss = nn.MSELoss().cuda()
timer_all = TimeOperator.Timer()
timer_train = TimeOperator.Timer()
timer_save = TimeOperator.Timer()
ce_loss = nn.CrossEntropyLoss().cuda()
type_detail_shown = False
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
# if args.gpu is None:
# checkpoint = torch.load(args.resume)
# else:
# # Map model to be loaded to specified single gpu.
# loc = 'cuda:{}'.format(args.gpu)
# checkpoint = torch.load(args.resume, map_location=loc)
start_epoch = checkpoint['epoch']
self.load_state_dict(checkpoint['state_dict'])
optimizer_g.load_state_dict(checkpoint['optimizer']['optimizer_g'])
# self.__dict__ = checkpoint['self_dic']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
# self.args = args
# warnings.warn('This is not equal to start from the beginning due to different rands states.')
#
else:
raise NotImplementedError("=> no checkpoint found at '{}'".format(args.resume))
if args.CodeTest:
args.train_epoch = start_epoch + 1
epochs = start_epoch + 1
best_acc = 0
for epoch in range(start_epoch, epochs):
if (epoch + 1) <= args.LearnRateWarm:
lr = args.LearnRate * (epoch + 1) / args.LearnRateWarm
else:
if args.LearnRateDecayType == 'None':
lr = args.LearnRate
elif args.LearnRateDecayType == 'Exp':
lr = args.LearnRate * ((1 + 10 * (epoch + 1 - args.LearnRateWarm) / (
args.train_epoch - args.LearnRateWarm)) ** -0.75)
elif args.LearnRateDecayType == 'Cosine':
lr = args.LearnRate * 0.5 * (1. + math.cos(
math.pi * (epoch + 1 - args.LearnRateWarm) / (args.train_epoch - args.LearnRateWarm)))
else:
raise NotImplementedError('args.LearnRateDecayType')
if lr != args.LearnRate:
def adjust_learning_rate(optimizer):
print('adjust_learning_rate: {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
adjust_learning_rate(optimizer_g)
timer_all_time = time.time()
# inf_t = time.time()
# print('start epoch {}'.format(epoch))
self.eval()
feature_vec, type_vec, group_vec = [], [], []
feature_vec_cluster = []
group_vec_cluster = []
feature_vec_classification = []
type_vec_cluster = []
data_vec = []
is_pair_all = []
timer_infer_data = TimeOperator.Timer()
rnmse_vec = [[], []] # mask = 0 1
with torch.no_grad():
inf_data_t = time.time()
for (fea0, fea1, class_labels0, class_labels1, mask, is_pair, index) in test_dataloader:
timer_infer_data.update(time.time() - inf_data_t)
# timer_infer_data.show(prefix='InferDataTime', total_count=len(test_dataloader),
# print_end_time=False)
fea0 = fea0.cuda()
fea1 = fea1.cuda()
if args.Rev:
h1, h0 = self.encode([fea0, fea1])
if args.SingleView != -1:
for v in range(len(mask[0])):
if v != 1 - args.SingleView:
mask[:, v] = 0
else:
h0, h1 = self.encode([fea0, fea1])
if args.SingleView != -1:
for v in range(len(mask[0])):
if v != args.SingleView:
mask[:, v] = 0
cluster_h0 = h0[mask[:, 0] == 1]
cluster_h1 = h1[mask[:, 1] == 1]
# if args.SingleView != -1:
# mask[:, args.SingleView] = 0
# # if args.SingleView == 0:
# # cluster_h1 = cluster_h1[[]]
# # class_labels1 = class_labels1[[]]
# # elif args.SingleView == 1:
# # class_labels0 = class_labels0[[]]
# # cluster_h0 = cluster_h0[[]]
# # else:
# # raise NotImplementedError('')
is_pair_all.extend(is_pair)
feature_vec_cluster.extend(torch.cat([cluster_h0, cluster_h1]).detach().cpu().numpy())
group_vec_cluster.extend(torch.concat((torch.zeros(len(cluster_h0), dtype=torch.int),
torch.ones(len(cluster_h1), dtype=torch.int))).numpy())
type_vec_cluster.extend(torch.concat((class_labels0[mask[:, 0] == 1],
class_labels1[mask[:, 1] == 1])).numpy())
feature_vec_classification.extend(torch.cat([h0, h1]).detach().cpu().numpy())
if (epoch + 1) == epochs or (epoch + 1) % args.VisualFreq == 0:
if torch.sum(torch.logical_not(torch.logical_or(mask[:, 1], mask[:, 0]))):
raise NotImplementedError('存在一个pair两个模态都缺失')
if args.reFill == 'Copy':
if torch.sum(mask[:, 0] == 0):
h0[mask[:, 0] == 0] = h1[mask[:, 0] == 0]
if torch.sum(mask[:, 1] == 0):
h1[mask[:, 1] == 0] = h0[mask[:, 1] == 0]
elif args.reFill == 'Center':
# raise NotImplementedError('')
if self.pred_center_cac is None:
pass
warnings.warn('self.pred_center_cac == None')
else:
centors = torch.zeros((len(mask), 2, len(self.pred_center_cac[0]))).cuda()
centors[mask[:, 0] == 1, 0] = self.pred_center_cac[
self.pred_cac[:torch.sum(mask[:, 0] == 1)]]
centors[mask[:, 1] == 1, 1] = self.pred_center_cac[
self.pred_cac[torch.sum(mask[:, 0] == 1):]]
if torch.sum(mask[:, 0] == 0):
h0[mask[:, 0] == 0] = centors[mask[:, 0] == 0, 1]
if torch.sum(mask[:, 1] == 0):
h1[mask[:, 1] == 0] = centors[mask[:, 1] == 0, 0]
elif args.reFill == 'KnnMapMean':
if torch.sum(mask[:, 0] == 0):
nearest = get_nearest_k(h1[mask[:, 0] == 0], h1[is_pair], args.reAlignK)
h0p = h0[is_pair]
h1[mask[:, 0] == 0] = torch.cat([torch.mean(h0p[ns], dim=0) for ns in nearest])
if torch.sum(mask[:, 1] == 0):
nearest = get_nearest_k(h0[mask[:, 1] == 0], h0[is_pair], args.reAlignK)
h1p = h1[is_pair]
h1[mask[:, 1] == 0] = torch.cat([torch.mean(h1p[ns], dim=0) for ns in nearest])
# raise NotImplementedError('')
elif args.reFill == 'KnnMean':
# 关联对齐, xi1 不变, xi2替换成离xi1最近的k个view2的点的mean
if torch.sum(mask[:, 1] == 0):
hs0 = h0[mask[:, 1] == 0]
he1 = h1[mask[:, 1] == 1]
nearest = get_nearest_k(hs0, he1, args.reAlignK)
# nearest = torch.argsort(torch.cdist(hs0.cpu(), he1.cpu()), dim=1)[:, :args.reAlignK]
h1[mask[:, 1] == 0] = torch.cat([torch.mean(he1[ns], dim=0) for ns in nearest])
# class_labels1[mask[:, 1] == 0] = class_labels1[mask[:, 1] == 1][nearest[:, 0]]
if torch.sum(mask[:, 0] == 0):
hs1 = h1[mask[:, 0] == 0]
he0 = h0[mask[:, 0] == 1]
nearest = get_nearest_k(hs1, he0, args.reAlignK)
# nearest = torch.argsort(torch.cdist(hs1.cpu(), he0.cpu()), dim=1)[:, :args.reAlignK]
h0[mask[:, 0] == 0] = torch.cat([torch.mean(he0[ns], dim=0) for ns in nearest])
# class_labels0[mask[:, 0] == 0] = class_labels0[mask[:, 0] == 1][nearest[:, 0]]
###############################################################
# 缺失补全, xi2 = mean(离xi1最近的k个view2的点)
# fill_num = k
# C = euclidean_dist(h0, h1)
# row_idx = C.argsort()
# col_idx = (C.t()).argsort()
# # Mij denotes the flag of i-th sample in view 0 and j-th sample in view 1
# M = torch.logical_and((mask[:, 0].repeat(test_num, 1)).t(), mask[:, 1].repeat(test_num, 1))
# for i in range(test_num):
# idx0 = col_idx[i, :][
# M[col_idx[i, :], i]] # idx for view 0 to sort and find the non-missing neighbors
# idx1 = row_idx[i, :][
# M[i, row_idx[i, :]]] # idx for view 1 to sort and find the non-missing neighbors
# if len(idx1) != 0 and len(idx0) == 0: # i-th sample in view 1 is missing
# avg_fill = h1[idx1[0:fill_num], :].sum(dim=0) / fill_num
# cnt += (class_labels1[idx1[0:fill_num]] == class_labels1[i]).sum()
# missing_cnt += 1
# recover_out0[i, :] = h0[i, :]
# recover_out1[i, :] = avg_fill # missing
# elif len(idx0) != 0 and len(idx1) == 0:
# avg_fill = h0[idx0[0:fill_num], :].sum(dim=0) / fill_num
# cnt += (class_labels0[idx0[0:fill_num]] == class_labels0[i]).sum()
# missing_cnt += 1
# recover_out0[i, :] = avg_fill # missing
# recover_out1[i, :] = h1[i, :]
# elif len(idx0) != 0 and len(idx1) != 0:
# recover_out0[i, :] = h0[i, :]
# recover_out1[i, :] = h1[i, :]
# else:
# raise Exception('error')
# if setting == 1:
# align_out0.extend((recover_out0.cpu()).numpy())
# align_out1.extend((recover_out1.cpu()).numpy())
# continue
#
else:
raise NotImplementedError('')
to_realign = torch.logical_and(is_pair == 0, torch.logical_and(mask[:, 1], mask[:, 0]))
if args.reAlign == 'KnnMean':
# 关联对齐, xi1 不变, xi2替换成离xi1最近的k个view2的点的mean
if torch.sum(to_realign):
ha1 = h1[to_realign]
nearest = get_nearest_k(h0[to_realign], ha1, args.reAlignK)
# dist = torch.cdist(h0[to_realign].cpu(), ha1.cpu())
# nearest = torch.argsort(dist, dim=1)[:, :args.reAlignK]
h1[to_realign] = torch.cat([torch.mean(ha1[ns], dim=0) for ns in nearest])
# class_labels1[is_pair == 0] = class_labels1[is_pair == 0][nearest[:, 0]]
elif args.reAlign == 'Copy':
if torch.sum(to_realign):
h1[to_realign] = h0[to_realign]
# class_labels1[is_pair == 0] = class_labels0[is_pair == 0]
elif args.reAlign == 'KnnMapMean':
if torch.sum(to_realign):
targ_v1 = h1[is_pair]
nearest = get_nearest_k(h0[to_realign], h0[is_pair], args.reAlignK)
h1[to_realign] = torch.cat([torch.mean(targ_v1[ns], dim=0) for ns in nearest])
# class_labels1[is_pair == 0] = ...
elif args.reAlign == 'Ignore':
pass
else:
raise NotImplementedError('')
if args.Rev:
fea0_rec, fea1_rec = self.decode([h1, h0])
else:
fea0_rec, fea1_rec = self.decode([h0, h1])
# if len(fea0_rec[0]) == len(fea1_rec[0]):
# fea_rec = torch.concat([fea0_rec, fea1_rec])
# fea = torch.concat([fea0, fea1])
# mask_c = torch.concat([mask[:, 0], mask[:, 1]])
# if torch.sum(mask_c == 0):
# rnmse_vec[0].extend(
# evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 0], xs=fea[mask_c == 0]).cpu().numpy())
# if torch.sum(mask_c == 1):
# rnmse_vec[1].extend(
# evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 1], xs=fea[mask_c == 1]).cpu().numpy())
# else:
# if torch.sum(mask == 0):
# n0_v0 = evaluate.get_rnmse(
# xs_hat=fea0_rec[mask[:, 0] == 0], xs=fea0[mask[:, 0] == 0]).cpu().numpy()
# n0_v1 = evaluate.get_rnmse(
# xs_hat=fea1_rec[mask[:, 1] == 0], xs=fea1[mask[:, 1] == 0]).cpu().numpy()
# rnmse_vec[0].extend(n0_v0)
# rnmse_vec[0].extend(n0_v1)
# if torch.sum(mask == 1):
# n1_v0 = evaluate.get_rnmse(
# xs_hat=fea0_rec[mask[:, 0] == 1], xs=fea0[mask[:, 0] == 1]).cpu().numpy()
# n1_v1 = evaluate.get_rnmse(
# xs_hat=fea1_rec[mask[:, 1] == 1], xs=fea1[mask[:, 1] == 1]).cpu().numpy()
# rnmse_vec[1].extend(n1_v0)
# rnmse_vec[1].extend(n1_v1)
g = torch.concat((torch.zeros(len(fea0), device=fea0.device, dtype=torch.int),
torch.ones(len(fea1), device=fea0.device, dtype=torch.int)))
h = torch.cat([h0, h1]).detach().cpu().numpy()
feature_vec.extend(h)
data_vec.extend(torch.cat([fea0, fea1]).detach().cpu().numpy())
group_vec.extend(g.cpu().numpy())
type_vec.extend(torch.concat((class_labels0, class_labels1)).numpy())
inf_data_t = time.time()
feature_vec = np.array(feature_vec)
data_vec = np.array(data_vec)
feature_vec_cluster = np.array(feature_vec_cluster)
is_pair_all = np.array(is_pair_all)
feature_vec_classification = np.array(feature_vec_classification)
group_vec = np.array(group_vec)
group_vec_cluster = np.array(group_vec_cluster)
type_vec = np.array(type_vec)
type_vec_cluster = np.array(type_vec_cluster)
rnmse_vec[0] = np.array(rnmse_vec[0])
rnmse_vec[1] = np.array(rnmse_vec[1])
kmeans_time = TimeOperator.Timer()
if args.ShowReconstruct:
if args.dataset == 'MNISTUSPS':
dims = [np.product(d.data.shape[1:]) for d in test_dataloader.dataset.datasets]
data_list = [np.asarray(it.data, dtype=np.float32) for it in test_dataloader.dataset.datasets]
Y = test_dataloader.dataset.datasets[0].targets
else:
dims = [d.shape[1] for d in test_dataloader.dataset.data]
data_list = [np.asarray(it, dtype=np.float32) for it in test_dataloader.dataset.data]
Y = test_dataloader.dataset.class_labels0
mask = test_dataloader.dataset.mask
n_per_cat = 10
rec0, rec1 = self.decode([
torch.from_numpy(feature_vec[group_vec == 0]).cuda(),
torch.from_numpy(feature_vec[group_vec == 1]).cuda()])
rec0 = rec0.detach().cpu().numpy()
rec1 = rec1.detach().cpu().numpy()
show_img = np.asarray([])
inds_map = np.asarray([])
for v in range(2):
col = np.asarray([])
inds_map_col = np.asarray([])
for y in range(10):
inds = np.arange(len(Y))[
np.logical_and(np.logical_and(mask[:, v] == 1, mask[:, 1 - v] == 0), Y == y)
]
np.random.shuffle(inds)
assert len(inds) >= n_per_cat
inds = inds[:n_per_cat]
raw_imgs = data_list[v][inds]
missing_imgs = data_list[1 - v][inds]
rec_imgs = [rec0, rec1][v][inds]
rec_imgs_miss = [rec0, rec1][1 - v][inds]
pack = np.asarray(
[raw_imgs, rec_imgs, missing_imgs, rec_imgs_miss]).reshape([-1, n_per_cat, 28, 28])
if len(col):
col = np.concatenate([col, pack], axis=0)
else:
col = pack
if len(inds_map_col):
inds_map_col = np.concatenate([inds_map_col, inds.reshape([1, -1])], axis=0)
else:
inds_map_col = inds.reshape([1, -1])
if len(show_img):
show_img = np.concatenate([show_img, col], axis=1)
else:
show_img = col
if len(inds_map):
inds_map = np.concatenate([inds_map, inds_map_col], axis=1)
else:
inds_map = inds_map_col
| plot_heat_map(inds_map, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecIM.svg')
| 7 | 2023-12-21 08:50:36+00:00 | 12k |
botcs/wolfson-scheduler | tests/test_solver.py | [
{
"identifier": "unravel_indices",
"path": "solver.py",
"snippet": "def unravel_indices(indices, shape):\n coord = []\n\n for dim in reversed(shape):\n coord.append(indices % dim)\n indices = indices // dim\n\n coord = torch.stack(coord[::-1], dim=-1)\n\n return coord"
},
{
"identifier": "generalized_outer_addition",
"path": "solver.py",
"snippet": "def generalized_outer_addition(vectors, output=None):\n \"\"\"\n Corrected function to compute the outer addition of N K-dimensional vectors using broadcasting.\n This function is equivalent to the following code:\n ```\n result = torch.zeros((K1, K2, ..., KN))\n for idx1 in range(K1):\n for idx2 in range(K2):\n ...\n result[idx1, idx2, ..., idxn] = vectors[idx1] + vectors[idx2] + ... + vectors[idxn]\n ```\n However, it is much faster because it uses pre-computed sums and sums of squares.\n\n :param vectors: List of N vectors of shape (K1, K2, ..., KN)\n :param output: Optional output tensor\n if provided, must be of shape (K1, K2, ..., KN)\n :return: Tensor of shape (K1, K2, ..., KN)\n \"\"\"\n\n # Assert all vectors are on the same device\n device = vectors[0].device\n assert all(\n v.device == device for v in vectors\n ), \"All vectors must be on the same device\"\n\n # Number of vectors (N) and dimensions (K)\n # N, K = vectors.shape\n N = len(vectors)\n Ks = [len(v) for v in vectors]\n if output is None:\n output = torch.zeros(Ks, dtype=vectors[0].dtype, device=vectors[0].device)\n else:\n assert output.shape == tuple(Ks), \"Output tensor has incorrect shape\"\n output.zero_()\n\n # Reshape each vector to have a unique non-singleton dimension\n for i in range(N):\n expanded_shape = [1] * N\n expanded_shape[i] = Ks[i]\n reshaped_vector = vectors[i].view(*expanded_shape)\n output += reshaped_vector\n\n return output"
},
{
"identifier": "compute_variances",
"path": "solver.py",
"snippet": "def compute_variances(X, Y):\n \"\"\"\n Compute variances between all combinations of vectors in X and Y.\n This function is equivalent to the following code:\n ```\n variances = torch.zeros((X.size(0), Y.size(0)))\n for i in range(X.size(0)):\n for j in range(Y.size(0)):\n concatenated = torch.cat((X[i], Y[j]))\n variances[i, j] = torch.var(concatenated, unbiased=False)\n ```\n However, it is much faster because it uses pre-computed sums and sums of squares.\n\n\n :param X: Tensor of shape (N, K)\n :param Y: Tensor of shape (M, L)\n \"\"\"\n\n # Compute sums and sums of squares for X\n sum_X = torch.sum(X, dim=1)\n sum_sq_X = torch.sum(X**2, dim=1)\n\n # Compute sums and sums of squares for Y\n sum_Y = torch.sum(Y, dim=1)\n sum_sq_Y = torch.sum(Y**2, dim=1)\n\n # Lengths of vectors in X and Y\n len_X = X.shape[1]\n len_Y = Y.shape[1]\n\n # Broadcasting sums and sum of squares for all combinations\n total_sum = sum_X.unsqueeze(1) + sum_Y.unsqueeze(0)\n total_sum_sq = sum_sq_X.unsqueeze(1) + sum_sq_Y.unsqueeze(0)\n total_len = len_X + len_Y\n\n # Compute variances\n mean = total_sum / total_len\n variances = total_sum_sq / total_len - mean**2\n\n return variances"
},
{
"identifier": "get_max_numel",
"path": "solver.py",
"snippet": "def get_max_numel(dtype, memory_capacity=None, device=\"cpu\"):\n \"\"\"\n Compute the maximum number of elements that fit in specified memory.\n\n :param dtype: Data type of the tensor (e.g., torch.float32)\n :param memory_capacity: Memory capacity in bytes\n :param device: 'cpu' or 'cuda'\n :return: maximum number of elements that fit\n \"\"\"\n\n # Get memory capacity\n if memory_capacity is None:\n memory_capacity = get_free_memory(device)\n\n # Calculate maximum number of elements that fit\n element_size = torch.tensor(\n [], dtype=dtype\n ).element_size() # Size in bytes of one element\n max_numel = memory_capacity // element_size\n\n return max_numel"
},
{
"identifier": "check_matrix_fit_and_num_chunks",
"path": "solver.py",
"snippet": "def check_matrix_fit_and_num_chunks(\n dimensions, dtype, memory_capacity=None, device=\"cpu\"\n):\n \"\"\"\n Check if a tensor of given dimensions and data type fits in specified memory.\n If not, return chunk sizes that maximize the capacity, slicing only along the first dimension.\n\n :param dimensions: Tuple of dimensions for the tensor\n :param dtype: Data type of the tensor (e.g., torch.float32)\n :param memory_capacity: Memory capacity in bytes\n :param device: 'cpu' or 'cuda'\n :return: number of chunks along the first dimension\n \"\"\"\n\n # Get memory capacity\n if memory_capacity is None:\n memory_capacity = get_memory_capacity(device)\n\n # Calculate total number of elements\n total_elements = 1\n for dim in dimensions:\n total_elements *= dim\n\n element_size = torch.tensor(\n [], dtype=dtype\n ).element_size() # Size in bytes of one element\n total_size = total_elements * element_size # Total memory required for the tensor\n\n if total_size <= memory_capacity:\n return 1\n\n # If doesn't fit, calculate chunk size for the first dimension\n other_dims_product = 1\n for dim in dimensions[1:]:\n other_dims_product *= dim\n\n max_first_dim_size = memory_capacity // (other_dims_product * element_size)\n if max_first_dim_size == 0:\n raise ValueError(\"Tensor does not fit in memory.\")\n\n num_chunks = math.ceil(dimensions[0] / max_first_dim_size)\n\n return num_chunks"
},
{
"identifier": "convert_property_to_categorical",
"path": "solver.py",
"snippet": "def convert_property_to_categorical(property):\n \"\"\"\n Convert the properties to a categorical variable.\n\n :param property: List of properties for each rower.\n Shape: (num_rowers)\n dtype: Any\n\n :return: Tensor of categorical properties.\n Shape: (num_rowers)\n dtype: torch.long\n \"\"\"\n\n unique_properties = set()\n for p in property:\n unique_properties.add(p)\n unique_properties = sorted(list(unique_properties))\n property = [unique_properties.index(p) for p in property]\n property = torch.tensor(property)\n return property"
},
{
"identifier": "extract_best_assignment",
"path": "solver.py",
"snippet": "def extract_best_assignment(assignments_per_week, total_score):\n \"\"\"\n Extract the best assignment for each outing.\n\n :param assignments_per_week: Tensor of assignments per week.\n shape: (num_outings, num_combinations, num_rowers)\n :param total_score: Tensor of total score for each assignment.\n shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings\n\n :return: Tensor of best assignment per outing.\n shape: (num_outings, 1, num_rowers)\n\n \"\"\"\n\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n\n # Find the top assignments\n # top_inds = torch.argsort(total_score.flatten(), descending=True)[0]\n top_idx = torch.argmax(total_score.flatten())\n\n top_idx = unravel_indices(top_idx, total_score.shape)\n\n # top_inds tells us for each outing the index of the top assignment\n top_assignment = torch.zeros(\n num_outings,\n 1,\n num_rowers,\n dtype=torch.uint8,\n device=assignments_per_week.device,\n )\n for outing_idx, comb_idx in enumerate(top_idx):\n top_assignment[outing_idx] = assignments_per_week[outing_idx, comb_idx]\n\n return top_assignment"
},
{
"identifier": "get_no_overlap_inds",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef get_no_overlap_inds(A, B):\n \"\"\"\n Perform matrix multiplication of A and B in chunks.\n Return the indices of rows in A and columns in B that have no overlap.\n Overlap is defined as a non-zero value in the product of A and B.\n\n :param A: First matrix\n shape: (num_combinations_A, num_rowers)\n :param B: Second matrix\n shape: (num_combinations_B, num_rowers)\n :param chunk_sizes: Chunk sizes for the first dimension of A\n :return: indices of rows in A and columns in B that have no overlap\n \"\"\"\n\n # check if the product of the two matrices fits in memory\n # if not, chunk the matrices and check for overlap in chunks\n num_chunks = check_matrix_fit_and_num_chunks(\n (A.shape[0], A.shape[1], B.shape[0]), dtype=A.dtype, device=A.device\n )\n\n # num_chunks = 1\n def multiply_and_find(A_chunk, B):\n # counts the number of double-assignments for each rower between the two boats\n assignment_count = torch.matmul(A_chunk, B.T)\n no_overlap_inds = torch.nonzero(assignment_count == 0)\n return no_overlap_inds\n\n # if the product fits in memory, check for overlap in one go\n if num_chunks == 1:\n return multiply_and_find(A, B)\n\n A_chunks = torch.chunk(A, num_chunks)\n\n # otherwise, chunk the matrices and check for overlap in chunks\n no_overlap_inds = []\n offset_idx = 0\n for A_chunk in tqdm.tqdm(A_chunks):\n # no_overlap_inds.append(multiply_and_find(A_chunk, B).tolist())\n chunk_inds = multiply_and_find(A_chunk, B)\n\n # add the chunk size to offset the indices\n chunk_inds[:, 0] += offset_idx\n offset_idx += A_chunk.shape[0]\n no_overlap_inds.append(chunk_inds)\n\n return torch.cat(no_overlap_inds)"
},
{
"identifier": "generate_binary_matrices",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef generate_binary_matrices(\n num_rowers,\n boat_sizes,\n device=\"cpu\",\n max_num_combinations=NUM_MAX_COMBINATION_PER_BOAT,\n):\n \"\"\"\n Generate binary matrices for each combination of rowers in boats.\n\n :param num_rowers: Total number of rowers\n :param boat_sizes: List of boat sizes\n \"\"\"\n per_boat_binary_matrices = []\n for boat_size in boat_sizes:\n # Precompute indices for combinations\n row_indices = []\n col_indices = []\n\n num_combinations = math.comb(num_rowers, boat_size)\n if num_combinations > max_num_combinations:\n M = torch.zeros((max_num_combinations, num_rowers), dtype=torch.bool)\n\n keep_indices = sample(\n torch.arange(num_combinations), k=max_num_combinations\n )\n keep_indices = keep_indices.sort().values\n i = 0\n for row, combination in enumerate(\n itertools.combinations(range(num_rowers), boat_size)\n ):\n if keep_indices[i] != row:\n continue\n for col in combination:\n row_indices.append(i)\n col_indices.append(col)\n i += 1\n if i == max_num_combinations:\n break\n\n else:\n M = torch.zeros((num_combinations, num_rowers), dtype=torch.bool)\n for row, combination in enumerate(\n itertools.combinations(range(num_rowers), boat_size)\n ):\n for col in combination:\n row_indices.append(row)\n col_indices.append(col)\n\n # Use advanced indexing to fill the matrix\n M[row_indices, col_indices] = 1\n per_boat_binary_matrices.append(M)\n return per_boat_binary_matrices"
},
{
"identifier": "eliminate_invalid_boats",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef eliminate_invalid_boats(\n binary_matrix, rower_sides, num_max_combinations=NUM_MAX_COMBINATION_PER_BOAT\n):\n \"\"\"\n Eliminate invalid boats from a binary matrix.\n\n Currently we consider a boat invalid if there are more rowers on one side than the other.\n We represent stroke as 1 and bow as -1 and 0 for no preference.\n\n :param binary_matrix: Binary matrix of rower combinations\n shape: (num_combinations, num_rowers)\n :return: Binary matrix with invalid boats eliminated\n \"\"\"\n\n # gather the rower sides for each rower in each boat for each combination\n num_assigned_rowers = binary_matrix[0].sum()\n # assert each row has the same number of assigned rowers\n assert (binary_matrix.sum(dim=1) == num_assigned_rowers).all()\n assert len(rower_sides) == binary_matrix.shape[1]\n idx = binary_matrix.nonzero()[:, 1].view(len(binary_matrix), num_assigned_rowers)\n outings = rower_sides[idx]\n\n # Compute the offset between the number of stroke and bow seats\n offset = torch.sum(outings, dim=1).abs()\n # Determine the number of rowers that are both stroke and bow seat\n count_where_both = torch.sum(outings == 0, dim=1)\n\n # Eliminate invalid boats\n is_valid = count_where_both >= offset\n binary_matrix = binary_matrix[is_valid]\n\n if len(binary_matrix) > num_max_combinations:\n binary_matrix = sample(binary_matrix, k=num_max_combinations)\n\n return binary_matrix"
},
{
"identifier": "generate_valid_assignments",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef generate_valid_assignments(\n single_boat_bin_matrices, num_max_combinations=NUM_MAX_PAIRWISE_COMBINATION\n):\n \"\"\"\n Generate valid combinations of rowers across multiple boats on a single outing\n\n :param matrices: List of binary matrices, each representing combinations for a boat.\n shape: List[\n Tensor(num_combinations_1, num_rowers),\n Tensor(num_combinations_2, num_rowers),\n ...\n Tensor(num_combinations_n, num_rowers),\n ]\n :return: Tensor of valid combinations across all boats.\n \"\"\"\n assert len(single_boat_bin_matrices) > 0, \"Must have at least one boat\"\n assert all(\n m.shape[1] == single_boat_bin_matrices[0].shape[1]\n for m in single_boat_bin_matrices\n ), \"All matrices must have the same number of rowers\"\n\n assignments = single_boat_bin_matrices[0]\n for boat_ind, boat_B in enumerate(single_boat_bin_matrices[1:], start=2):\n no_overlap_inds = get_no_overlap_inds(assignments, boat_B)\n\n if len(no_overlap_inds) > num_max_combinations:\n no_overlap_inds = sample(no_overlap_inds, k=num_max_combinations)\n\n A_inds, B_inds = no_overlap_inds.T\n\n # update boat_A to be the combination of boat_A and boat_B with no overlap\n assignments = assignments[A_inds] + boat_B[B_inds] * boat_ind\n return assignments"
},
{
"identifier": "evaluate_skill_variance",
"path": "solver.py",
"snippet": "def evaluate_skill_variance(assignments_per_week, skill_levels, dtype=torch.float16):\n \"\"\"\n This relies on the notion that the skill levels entered are not categorical\n but integer values (or can be mapped to ordered categories, e.g. M1 > M2 > M3 ... )\n\n :param assignments_per_week: Tensor of assignments per week.\n shape: (num_outings, num_combinations, num_rowers)\n\n :param skill_levels: Tensor of skill levels for each rower.\n shape: (num_rowers,)\n\n :return: Tensor of variance for each combination in each outing.\n shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings\n \"\"\"\n\n # assert that the number of assigned rowers is the same for each outing\n for outing_idx in range(len(assignments_per_week)):\n num_assigned_rowers = assignments_per_week[outing_idx][0].sum()\n assert (\n assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers\n ).all()\n\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n max_num_boats = assignments_per_week.max().item()\n outing_variance = torch.zeros(\n num_outings, num_combinations, device=assignments_per_week.device, dtype=dtype\n )\n for boat_idx in range(max_num_boats):\n boat_assignment = assignments_per_week == boat_idx + 1\n # we use binary masking\n X = skill_levels * boat_assignment\n\n # but we need to make sure that we don't include the rowers that are not assigned\n X_sum = X.sum(dim=2)\n X_len = boat_assignment.sum(dim=2)\n X_mean = X_sum / X_len\n\n boat_variance = ((X - X_mean.unsqueeze_(2)) * boat_assignment) ** 2\n boat_variance = boat_variance.sum(dim=2)\n\n # we use the unbiased variance since the sample size is small\n boat_variance /= torch.clamp(X_len - 1, min=1)\n\n outing_variance += boat_variance\n\n # now we need to compute the variance between the outings across the week\n week_variance = generalized_outer_addition(outing_variance)\n return week_variance"
},
{
"identifier": "evaluate_num_preferred_outings",
"path": "solver.py",
"snippet": "def evaluate_num_preferred_outings(\n assignments_per_week, num_preferred_outings, dtype=torch.long\n):\n # assert that the number of assigned rowers is the same for each outing\n for outing_idx in range(len(assignments_per_week)):\n num_assigned_rowers = assignments_per_week[outing_idx, 0].sum()\n assert (\n assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers\n ).all()\n\n assignments_per_week = assignments_per_week > 0\n\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n\n # just to pin memory and reuse the output tensor\n num_assignment_per_rower = torch.zeros(\n [num_combinations] * num_outings,\n device=assignments_per_week.device,\n dtype=dtype,\n )\n\n week_over_assignment = torch.zeros(\n [num_combinations] * num_outings,\n device=assignments_per_week.device,\n dtype=dtype,\n )\n\n for rower_idx in range(num_rowers):\n num_assignment_per_rower = generalized_outer_addition(\n assignments_per_week[:, :, rower_idx], output=num_assignment_per_rower\n )\n num_preferred_outings_per_rower = num_preferred_outings[rower_idx]\n assignment_diff = num_assignment_per_rower - num_preferred_outings_per_rower\n over_assignment = assignment_diff.clamp_(min=0)\n week_over_assignment += over_assignment\n\n return week_over_assignment"
},
{
"identifier": "evaluate_assignments_per_week",
"path": "solver.py",
"snippet": "def evaluate_assignments_per_week(\n assignments_per_week, properties, weights, return_stats=False\n):\n \"\"\"\n Evaluate the assignments per week.\n\n :param assignments_per_week: Tensor of num_outings different assignments for the week.\n Shape: (num_outings, num_combinations, num_rowers)\n dtype: torch.uint8\n :param properties: dict of Tensors of properties.\n Shape: {property_name: Tensor(num_rowers)}\n dtype: torch.long\n :param weights: dict of weights for each property.\n Shape: {property_name: float}\n :param return_stats: Whether to return the stats for each property.\n\n :return: Total score for the week.\n Shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings\n :return: Stats for each weight category.\n \"\"\"\n\n # Compute variance of skill levels\n week_variance = evaluate_skill_variance(\n assignments_per_week, properties[\"skill_level\"]\n )\n\n # Compute number of preferred outings\n week_num_preferred_outings = evaluate_num_preferred_outings(\n assignments_per_week, properties[\"num_preferred_outings\"]\n )\n\n # Compute total score\n total_score = (\n weights[\"skill variance\"] * week_variance\n + weights[\"over assignment\"] * week_num_preferred_outings\n )\n\n if return_stats:\n stats = {\n \"values\": {\n \"skill variance\": week_variance,\n \"over assignment\": week_num_preferred_outings,\n },\n \"weights\": weights,\n \"total\": total_score,\n }\n return total_score, stats\n\n return total_score"
},
{
"identifier": "permute_top_assignments",
"path": "solver.py",
"snippet": "def permute_top_assignments(\n valid_assignments,\n assignments_per_week,\n total_scores,\n num_permutations=10,\n randomize_permutations=True,\n):\n \"\"\"\n Permute the top assignments for the week.\n \"\"\"\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n\n assert len(valid_assignments) == num_outings, \"Must have the same number of outings\"\n assert (\n len(assignments_per_week) == num_outings\n ), \"Must have the same number of outings\"\n if any(m.ndim != 2 for m in valid_assignments):\n raise ValueError(\"All outing assignments have to be 2D for every outing\")\n if any(m.shape[1] != num_rowers for m in valid_assignments):\n raise ValueError(\n \"All outing assignments have to have the same number of rowers\"\n )\n if any((m.sum(dim=1) != m[0].sum()).any() for m in valid_assignments):\n raise ValueError(\n f\"In each combination of every outing,\\\n the number of rowers assigned must be the same.\"\n )\n\n # assert all(\n # m.ndim == 2\n # for m in valid_assignments\n # ), f\"All matrices must have the same number of dim: {[m.shape for m in valid_assignments]}\"\n # assert all(\n # m.shape[1] == num_rowers\n # for m in valid_assignments\n # ), \"All matrices must have the same number of rowers\"\n # for outing_idx in range(len(valid_assignments)):\n # assert (valid_assignments[outing_idx].sum() == valid_assignments[outing_idx][0].sum()).all(),\\\n # \"Combinations must have the same number of rowers assigned in an outing\"\n\n # assert that the number of assigned rowers is the same for each outing\n for outing_idx in range(len(assignments_per_week)):\n num_assigned_rowers = assignments_per_week[outing_idx, 0].sum()\n assert (\n assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers\n ).all()\n\n best_assignment = extract_best_assignment(assignments_per_week, total_scores)\n\n # in the permutations we fix all outings except the outing we are permuting\n permuted_assignment = best_assignment.repeat(1, num_permutations + 1, 1)\n for outing_idx in range(len(assignments_per_week)):\n # just copy the best assignment num_permutations times\n if randomize_permutations:\n # we need to make sure that the best assignment is included\n permuted_assignment[outing_idx, 1:] = sample(\n valid_assignments[outing_idx], k=num_permutations\n )\n else:\n permuted_assignment[outing_idx, 1:] = valid_assignments[outing_idx][\n :num_permutations\n ]\n return permuted_assignment"
}
] | import torch
import unittest
import math
from unittest.mock import patch
from solver import (
unravel_indices,
generalized_outer_addition,
compute_variances,
get_max_numel,
check_matrix_fit_and_num_chunks,
convert_property_to_categorical,
extract_best_assignment,
get_no_overlap_inds,
generate_binary_matrices,
eliminate_invalid_boats,
generate_valid_assignments,
evaluate_skill_variance,
evaluate_num_preferred_outings,
evaluate_assignments_per_week,
permute_top_assignments,
) | 9,482 | A = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1 ,0], [0, 0, 0, 1]])
B = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1 ,0], [0, 0, 0, 1]])
C = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1 ,0], [0, 0, 0, 1]])
num_max_combinations = 2
result = generate_valid_assignments([A, B, C], num_max_combinations)
self.assertLessEqual(len(result), num_max_combinations)
def test_consistent_number_of_rowers(self):
matrix1 = torch.tensor([[1, 0, 0], [0, 1, 0]])
matrix2 = torch.tensor([[1, 0], [0, 1]])
with self.assertRaises(AssertionError):
generate_valid_assignments([matrix1, matrix2])
class TestEvaluateSkillVariance(unittest.TestCase):
def test_predefined_skills_and_assignments(self):
assignments = torch.tensor([[[1, 0, 1], [0, 1, 1]]]) # 1 outing, 2 combinations, 3 rowers
skills = torch.tensor([3, 5, 7]) # Skill levels
variance_1 = torch.var(torch.tensor([3., 7]))
variance_2 = torch.var(torch.tensor([5., 7]))
expected_result = torch.tensor([variance_1, variance_2], dtype=torch.float16)
result = evaluate_skill_variance(assignments, skills, dtype=torch.float16)
self.assertTrue(torch.equal(result, expected_result))
def test_multiple_boats(self):
assignments = torch.tensor([[[1, 2, 1, 2], [2, 1, 1, 2], [1, 1, 2, 2]]]) # 1 outing, 3 combinations, 3 rowers
skills = torch.tensor([3, 5, 7, 9]) # Skill levels
variance_37 = torch.var(torch.tensor([3., 7]))
variance_59 = torch.var(torch.tensor([5., 9]))
variance_39 = torch.var(torch.tensor([3., 9]))
variance_57 = torch.var(torch.tensor([5., 7]))
variance_35 = torch.var(torch.tensor([3., 5]))
variance_79 = torch.var(torch.tensor([7., 9]))
expected_result = torch.tensor([
variance_37 + variance_59,
variance_39 + variance_57,
variance_35 + variance_79
], dtype=torch.float16)
result = evaluate_skill_variance(assignments, skills, dtype=torch.float16)
self.assertIsInstance(result, torch.Tensor)
self.assertTrue(torch.equal(result, expected_result))
def test_multiple_outings(self):
assignments = torch.tensor([
[[1, 0, 1], [0, 1, 1]], # Outing 1
[[1, 0, 1], [0, 1, 1]] # Outing 2
])
skills = torch.tensor([3, 5, 7])
variance_1 = torch.var(torch.tensor([3., 7]))
variance_2 = torch.var(torch.tensor([5., 7]))
expected_result = torch.tensor([
[2*variance_1, variance_2+variance_1],
[variance_1+variance_2, 2*variance_2]
], dtype=torch.float16)
result = evaluate_skill_variance(assignments, skills, dtype=torch.float16)
self.assertTrue(torch.equal(result, expected_result))
def test_edge_case_no_rowers_assigned(self):
assignments = torch.tensor([[[0, 0, 0], [0, 0, 0]]]) # No rowers assigned
skills = torch.tensor([3, 5, 7])
result = evaluate_skill_variance(assignments, skills, dtype=torch.float16)
# Expect zero variance as no rowers are assigned
self.assertTrue(torch.all(result == 0))
def test_edge_case_same_skill_level(self):
assignments = torch.tensor([[[1, 0, 1], [0, 1, 1]]])
skills = torch.tensor([5, 5, 5]) # All rowers have the same skill level
result = evaluate_skill_variance(assignments, skills, dtype=torch.float16)
# Expect zero variance as all rowers have the same skill level
self.assertTrue(torch.all(result == 0))
class TestEvaluateNumPreferredOutings(unittest.TestCase):
def test_predefined_assignments_and_preferences(self):
assignments = torch.tensor([
[[1,0,0], [0,1,0], [0,0,1]], # Outing 1
[[1,0,0], [0,1,0], [0,0,1]], # Outing 2
])
preferred_outings = torch.tensor([0, 1, 2])
expected_result = torch.tensor([
[2, 1, 1],
[1, 1, 0],
[1, 0, 0]
])
result = evaluate_num_preferred_outings(assignments, preferred_outings)
self.assertTrue(torch.equal(result, expected_result))
class TestPermuteTopAssignments(unittest.TestCase):
def test_permute_top_assignments(self):
# Small, handcrafted example
assignments_per_week = torch.tensor([
[[1, 0, 0], [0, 1, 0]], # Outing 1
[[0, 1, 1], [1, 0, 1]] # Outing 2
])
total_score = torch.tensor([
[0, 1],
[3, 2]
])
# this means that the best assignment has the
# index of [1, 0] in the score tensor
# that translates to the assignment of
# outing 1 is [0, 1, 0] (the 1st combination of the 1st outing)
# outing 2 is [0, 1, 1] (the 0th combination of the 2nd outing)
# The valid replacements are used for the permutation
# to generate alternatives to a single outing at a time
valid_assignments = [
torch.tensor([[2, 0, 0], [0, 2, 0], [0, 0, 2]]),
torch.tensor([[0, 2, 2], [2, 0, 2], [2, 2, 0]])
]
# Although the algorithm would never generate these assignments
# because if there are two boats available they would need to be used
# so this scenario is just for illustrative purposes.
num_permutations = 3
|
class TestUnravelIndices(unittest.TestCase):
def test_simple_case(self):
indices = torch.tensor([0, 1, 2, 3, 4, 5])
shape = (2, 3)
expected_result = torch.tensor([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]])
result = unravel_indices(indices, shape)
self.assertTrue(torch.equal(result, expected_result))
def test_single_dimension(self):
indices = torch.tensor([0, 1, 2, 3])
shape = (4,)
expected_result = torch.tensor([[0], [1], [2], [3]])
result = unravel_indices(indices, shape)
self.assertTrue(torch.equal(result, expected_result))
def test_multi_dimension(self):
indices = torch.tensor([0, 1, 5, 11])
shape = (2, 3, 2)
expected_result = torch.tensor([[0, 0, 0], [0, 0, 1], [0, 2, 1], [1, 2, 1]])
result = unravel_indices(indices, shape)
self.assertTrue(torch.equal(result, expected_result))
def test_edge_cases(self):
indices = torch.tensor([0])
shape = (1, 1, 1)
expected_result = torch.tensor([[0, 0, 0]])
result = unravel_indices(indices, shape)
self.assertTrue(torch.equal(result, expected_result))
def test_output_type_and_shape(self):
indices = torch.tensor([3, 7])
shape = (2, 4)
result = unravel_indices(indices, shape)
self.assertIsInstance(result, torch.Tensor)
self.assertEqual(result.shape, (2, 2))
class TestGeneralizedOuterAddition(unittest.TestCase):
def test_correct_calculation(self):
vectors = [torch.tensor([1, 2]), torch.tensor([3, 4])]
expected_result = torch.tensor([[4, 5], [5, 6]])
result = generalized_outer_addition(vectors)
self.assertTrue(torch.equal(result, expected_result))
def test_different_vector_sizes(self):
vectors = [torch.tensor([1, 2]), torch.tensor([3, 4, 5])]
expected_result = torch.tensor([[4, 5, 6], [5, 6, 7]])
result = generalized_outer_addition(vectors)
self.assertTrue(torch.equal(result, expected_result))
def test_with_output_tensor(self):
vectors = [torch.tensor([1, 2]), torch.tensor([3, 4])]
output = torch.empty((2, 2))
expected_result = torch.tensor([[4, 5], [5, 6]])
result = generalized_outer_addition(vectors, output)
self.assertTrue(torch.equal(result, expected_result))
def test_error_with_incorrect_output_shape(self):
vectors = [torch.tensor([1, 2]), torch.tensor([3, 4])]
output = torch.empty((3, 3))
with self.assertRaises(AssertionError):
generalized_outer_addition(vectors, output)
def test_type_and_device_consistency(self):
vectors = [torch.tensor([1., 2.], device="cuda"), torch.tensor([3., 4.], device="cuda")]
result = generalized_outer_addition(vectors)
self.assertTrue(result.dtype == torch.float32)
self.assertTrue(result.device.type == "cuda")
class TestComputeVariances(unittest.TestCase):
def test_variances(self):
# Create sample data
torch.manual_seed(0) # For reproducibility
X = torch.rand(3, 7)
Y = torch.rand(4, 5)
# Expected variances computed by manual concatenation
expected_variances = torch.zeros((X.size(0), Y.size(0)))
for i in range(X.size(0)):
for j in range(Y.size(0)):
concatenated = torch.cat((X[i], Y[j]))
expected_variances[i, j] = torch.var(concatenated, unbiased=False)
# Variances computed by the function
actual_variances = compute_variances(X, Y)
# Assert equality (within a tolerance to account for floating-point errors)
self.assertTrue(torch.allclose(expected_variances, actual_variances, atol=1e-6))
class TestGetMaxNumel(unittest.TestCase):
@patch('solver.get_free_memory')
def test_with_different_dtypes(self, mock_get_free_memory):
mock_get_free_memory.return_value = 1024 # Mock 1024 bytes of free memory
dtypes = [torch.float32, torch.int32, torch.float64]
for dtype in dtypes:
element_size = torch.tensor([], dtype=dtype).element_size()
expected_result = 1024 // element_size
result = get_max_numel(dtype)
self.assertEqual(result, expected_result)
@patch('solver.get_free_memory')
def test_without_specified_memory_capacity(self, mock_get_free_memory):
mock_get_free_memory.return_value = 2048 # Mock 2048 bytes of free memory
dtype = torch.float32
element_size = torch.tensor([], dtype=dtype).element_size()
expected_result = 2048 // element_size
result = get_max_numel(dtype)
self.assertEqual(result, expected_result)
def test_with_specified_memory_capacity(self):
dtype = torch.float32
memory_capacity = 4096 # Specify 4096 bytes of memory
element_size = torch.tensor([], dtype=dtype).element_size()
expected_result = 4096 // element_size
result = get_max_numel(dtype, memory_capacity)
self.assertEqual(result, expected_result)
class TestCheckMatrixFitAndNumChunks(unittest.TestCase):
def test_tensor_fits_memory(self):
dimensions = (10, 10, 10)
dtype = torch.float32
memory_capacity = 40000 # Set a capacity that's more than enough
self.assertEqual(check_matrix_fit_and_num_chunks(dimensions, dtype, memory_capacity), 1)
def test_tensor_exceeds_memory(self):
dimensions = (100, 100, 100)
dtype = torch.float32
memory_capacity = 1000 # Set a capacity that's too small
self.assertRaises(ValueError, check_matrix_fit_and_num_chunks, dimensions, dtype, memory_capacity)
def test_different_data_types(self):
dimensions = (100, 100)
memory_capacity = 100000
for dtype in [torch.float32, torch.int32, torch.float64]:
self.assertIsInstance(check_matrix_fit_and_num_chunks(dimensions, dtype, memory_capacity), int)
def test_various_dimensions(self):
dtype = torch.float32
memory_capacity = 10000
test_dimensions = [
(100, 20, 5),
(50, 40, 30),
(200, 10, 10)
]
for dimensions in test_dimensions:
self.assertIsInstance(check_matrix_fit_and_num_chunks(dimensions, dtype, memory_capacity), int)
def test_without_specified_memory_capacity(self):
dimensions = (10, 10, 10)
dtype = torch.float32
self.assertIsInstance(check_matrix_fit_and_num_chunks(dimensions, dtype), int)
class TestConvertPropertyToCategorical(unittest.TestCase):
def test_correct_conversion(self):
property_list = ["red", "blue", "red"]
expected_result = torch.tensor([1, 0, 1])
result = convert_property_to_categorical(property_list)
self.assertTrue(torch.equal(result, expected_result))
def test_empty_input(self):
property_list = []
expected_result = torch.tensor([])
result = convert_property_to_categorical(property_list)
self.assertTrue(torch.equal(result, expected_result))
def test_mixed_values(self):
property_list = ["apple", "banana", "apple", "cherry"]
expected_result = torch.tensor([0, 1, 0, 2])
result = convert_property_to_categorical(property_list)
self.assertTrue(torch.equal(result, expected_result))
def test_consistency_in_indexing(self):
property_list = ["dog", "cat", "bird", "cat"]
expected_result = torch.tensor([2, 1, 0, 1])
result = convert_property_to_categorical(property_list)
self.assertTrue(torch.equal(result, expected_result))
def test_output_type_and_shape(self):
property_list = ["one", "two", "three"]
result = convert_property_to_categorical(property_list)
self.assertIsInstance(result, torch.Tensor)
self.assertEqual(result.dtype, torch.int64)
self.assertEqual(result.shape, (3,))
class TestExtractBestAssignment(unittest.TestCase):
def test_valid_inputs(self):
# Mock data
assignments_per_week = torch.randint(0, 2, (3, 4, 5), dtype=torch.uint8)
total_score = torch.rand(4, 4, 4) # Mock score tensor for 3 outings
# Expected output shape
expected_shape = (3, 1, 5)
result = extract_best_assignment(assignments_per_week, total_score)
self.assertEqual(result.shape, expected_shape)
def test_edge_case_single_outing(self):
assignments_per_week = torch.randint(0, 2, (1, 4, 5), dtype=torch.uint8)
total_score = torch.rand(4,)
expected_shape = (1, 1, 5)
result = extract_best_assignment(assignments_per_week, total_score)
self.assertEqual(result.shape, expected_shape)
def test_output_type(self):
assignments_per_week = torch.randint(0, 2, (3, 4, 5), dtype=torch.uint8)
total_score = torch.rand(4, 4, 4)
result = extract_best_assignment(assignments_per_week, total_score)
self.assertIsInstance(result, torch.Tensor)
self.assertTrue(result.dtype, torch.uint8)
def test_correctness_of_assignment_extraction(self):
# Mock data for 3 outings with 4 combinations each
assignments_per_week = torch.tensor([
[[0, 0], [0, 1], [1, 0], [1, 1]], # Outing 1
[[0, 0], [0, 1], [1, 0], [1, 1]], # Outing 2
[[0, 0], [0, 1], [1, 0], [1, 1]] # Outing 3
], dtype=torch.uint8)
# Mock total scores where the best scores are known
# Assuming the best scores are for the combinations [1, 0, 3] for outings [1, 2, 3]
total_score = torch.zeros((4, 4, 4))
total_score[1, 0, 3] = 1 # Highest score
# Expected best assignments for each outing
expected_assignments = torch.tensor([
[[0, 1]], # Outing 1
[[0, 0]], # Outing 2
[[1, 1]] # Outing 3
], dtype=torch.uint8) # Add dimension to match the expected output shape
result = extract_best_assignment(assignments_per_week, total_score)
self.assertTrue(torch.equal(result, expected_assignments))
class TestGetNoOverlapInds(unittest.TestCase):
def test_no_overlap(self):
A = torch.tensor([[1, 0], [0, 1]])
B = torch.tensor([[0, 1], [1, 0]])
expected_result = torch.tensor([[0, 0], [1, 1]])
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
def test_partial_overlap(self):
A = torch.tensor([[1, 1], [0, 1]])
B = torch.tensor([[1, 0], [0, 1]])
expected_result = torch.tensor([[1, 0]])
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
def test_complete_overlap(self):
A = torch.tensor([[1, 1], [1, 1]])
B = torch.tensor([[1, 1], [1, 1]])
expected_result = torch.empty((0, 2), dtype=torch.int64)
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
def test_different_sizes(self):
A = torch.tensor([[1, 1, 0, 0], [0, 1, 1, 0]])
B = torch.tensor([[1, 1, 0, 0], [0, 1, 1, 0], [1, 0, 0, 1]])
expected_result = torch.tensor([[1, 2]])
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
class TestGenerateBinaryMatrices(unittest.TestCase):
def test_correct_matrix_generation(self):
num_rowers = 4
boat_sizes = [2, 3]
expected_combinations = [math.comb(num_rowers, boat_size) for boat_size in boat_sizes]
result_matrices = generate_binary_matrices(num_rowers, boat_sizes)
for i, M in enumerate(result_matrices):
self.assertEqual(M.shape[0], expected_combinations[i]) # Correct number of combinations
self.assertEqual(M.shape[1], num_rowers) # Correct number of columns
self.assertTrue(torch.all((M.sum(axis=1) == boat_sizes[i]).logical_or(M.sum(axis=1) == 0))) # Correct boat sizes
def test_different_rower_and_boat_sizes(self):
num_rowers = 5
boat_sizes = [1, 4]
result_matrices = generate_binary_matrices(num_rowers, boat_sizes)
for M, boat_size in zip(result_matrices, boat_sizes):
self.assertEqual(M.shape, (math.comb(num_rowers, boat_size), num_rowers))
def test_output_type(self):
num_rowers = 3
boat_sizes = [2]
result_matrices = generate_binary_matrices(num_rowers, boat_sizes)
for M in result_matrices:
self.assertIsInstance(M, torch.Tensor)
self.assertTrue(M.dtype, torch.bool)
class TestEliminateInvalidBoats(unittest.TestCase):
def test_no_elimination_of_valid_boats(self):
binary_matrix = torch.tensor([[1, 0, 1], [1, 1, 0], [0, 1, 1]])
rower_sides = torch.tensor([1, -1, 0]) # Stroke, Bow, No preference
expected_result = torch.tensor([[1, 0, 1], [1, 1, 0], [0, 1, 1]]) # Eliminate [1, 1, 0] combination
result = eliminate_invalid_boats(binary_matrix, rower_sides)
self.assertTrue(torch.equal(result, expected_result))
def test_elimination_of_invalid_boats(self):
binary_matrix = torch.tensor([[1, 1, 0], [1, 0, 1]])
rower_sides = torch.tensor([1, 0, 1]) # Stroke, No preference, Stroke
# Eliminate [1, 0, 1] combination because of two stroke siders
expected_result = torch.tensor([[1, 1, 0]])
result = eliminate_invalid_boats(binary_matrix, rower_sides)
self.assertTrue(torch.equal(result, expected_result))
def test_combination_limit(self):
binary_matrix = torch.tensor([[1, 0, 1], [1, 0, 1], [0, 1, 1]])
rower_sides = torch.tensor([1, -1, 0]) # Stroke, Bow
num_max_combinations = 2
result = eliminate_invalid_boats(binary_matrix, rower_sides, num_max_combinations)
self.assertLessEqual(len(result), num_max_combinations)
def test_output_type_and_shape(self):
binary_matrix = torch.tensor([[1, 0, 1], [1, 1, 0], [0, 1, 1]])
rower_sides = torch.tensor([1, -1, 0])
result = eliminate_invalid_boats(binary_matrix, rower_sides)
self.assertIsInstance(result, torch.Tensor)
self.assertEqual(result.dim(), 2)
class TestGenerateValidCombinations(unittest.TestCase):
def test_valid_combinations(self):
A = torch.tensor([[1, 1, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0]])
B = torch.tensor([[1, 1, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0]])
C = torch.tensor([[0, 0, 0, 0, 1, 1]])
result = generate_valid_assignments([A, B, C])
expected_result = torch.tensor([[2, 1, 1, 2, 3, 3]])
self.assertTrue(torch.equal(result, expected_result))
def test_combination_limit(self):
A = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1 ,0], [0, 0, 0, 1]])
B = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1 ,0], [0, 0, 0, 1]])
C = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1 ,0], [0, 0, 0, 1]])
num_max_combinations = 2
result = generate_valid_assignments([A, B, C], num_max_combinations)
self.assertLessEqual(len(result), num_max_combinations)
def test_consistent_number_of_rowers(self):
matrix1 = torch.tensor([[1, 0, 0], [0, 1, 0]])
matrix2 = torch.tensor([[1, 0], [0, 1]])
with self.assertRaises(AssertionError):
generate_valid_assignments([matrix1, matrix2])
class TestEvaluateSkillVariance(unittest.TestCase):
def test_predefined_skills_and_assignments(self):
assignments = torch.tensor([[[1, 0, 1], [0, 1, 1]]]) # 1 outing, 2 combinations, 3 rowers
skills = torch.tensor([3, 5, 7]) # Skill levels
variance_1 = torch.var(torch.tensor([3., 7]))
variance_2 = torch.var(torch.tensor([5., 7]))
expected_result = torch.tensor([variance_1, variance_2], dtype=torch.float16)
result = evaluate_skill_variance(assignments, skills, dtype=torch.float16)
self.assertTrue(torch.equal(result, expected_result))
def test_multiple_boats(self):
assignments = torch.tensor([[[1, 2, 1, 2], [2, 1, 1, 2], [1, 1, 2, 2]]]) # 1 outing, 3 combinations, 3 rowers
skills = torch.tensor([3, 5, 7, 9]) # Skill levels
variance_37 = torch.var(torch.tensor([3., 7]))
variance_59 = torch.var(torch.tensor([5., 9]))
variance_39 = torch.var(torch.tensor([3., 9]))
variance_57 = torch.var(torch.tensor([5., 7]))
variance_35 = torch.var(torch.tensor([3., 5]))
variance_79 = torch.var(torch.tensor([7., 9]))
expected_result = torch.tensor([
variance_37 + variance_59,
variance_39 + variance_57,
variance_35 + variance_79
], dtype=torch.float16)
result = evaluate_skill_variance(assignments, skills, dtype=torch.float16)
self.assertIsInstance(result, torch.Tensor)
self.assertTrue(torch.equal(result, expected_result))
def test_multiple_outings(self):
assignments = torch.tensor([
[[1, 0, 1], [0, 1, 1]], # Outing 1
[[1, 0, 1], [0, 1, 1]] # Outing 2
])
skills = torch.tensor([3, 5, 7])
variance_1 = torch.var(torch.tensor([3., 7]))
variance_2 = torch.var(torch.tensor([5., 7]))
expected_result = torch.tensor([
[2*variance_1, variance_2+variance_1],
[variance_1+variance_2, 2*variance_2]
], dtype=torch.float16)
result = evaluate_skill_variance(assignments, skills, dtype=torch.float16)
self.assertTrue(torch.equal(result, expected_result))
def test_edge_case_no_rowers_assigned(self):
assignments = torch.tensor([[[0, 0, 0], [0, 0, 0]]]) # No rowers assigned
skills = torch.tensor([3, 5, 7])
result = evaluate_skill_variance(assignments, skills, dtype=torch.float16)
# Expect zero variance as no rowers are assigned
self.assertTrue(torch.all(result == 0))
def test_edge_case_same_skill_level(self):
assignments = torch.tensor([[[1, 0, 1], [0, 1, 1]]])
skills = torch.tensor([5, 5, 5]) # All rowers have the same skill level
result = evaluate_skill_variance(assignments, skills, dtype=torch.float16)
# Expect zero variance as all rowers have the same skill level
self.assertTrue(torch.all(result == 0))
class TestEvaluateNumPreferredOutings(unittest.TestCase):
def test_predefined_assignments_and_preferences(self):
assignments = torch.tensor([
[[1,0,0], [0,1,0], [0,0,1]], # Outing 1
[[1,0,0], [0,1,0], [0,0,1]], # Outing 2
])
preferred_outings = torch.tensor([0, 1, 2])
expected_result = torch.tensor([
[2, 1, 1],
[1, 1, 0],
[1, 0, 0]
])
result = evaluate_num_preferred_outings(assignments, preferred_outings)
self.assertTrue(torch.equal(result, expected_result))
class TestPermuteTopAssignments(unittest.TestCase):
def test_permute_top_assignments(self):
# Small, handcrafted example
assignments_per_week = torch.tensor([
[[1, 0, 0], [0, 1, 0]], # Outing 1
[[0, 1, 1], [1, 0, 1]] # Outing 2
])
total_score = torch.tensor([
[0, 1],
[3, 2]
])
# this means that the best assignment has the
# index of [1, 0] in the score tensor
# that translates to the assignment of
# outing 1 is [0, 1, 0] (the 1st combination of the 1st outing)
# outing 2 is [0, 1, 1] (the 0th combination of the 2nd outing)
# The valid replacements are used for the permutation
# to generate alternatives to a single outing at a time
valid_assignments = [
torch.tensor([[2, 0, 0], [0, 2, 0], [0, 0, 2]]),
torch.tensor([[0, 2, 2], [2, 0, 2], [2, 2, 0]])
]
# Although the algorithm would never generate these assignments
# because if there are two boats available they would need to be used
# so this scenario is just for illustrative purposes.
num_permutations = 3
| result = permute_top_assignments( | 14 | 2023-12-18 05:12:36+00:00 | 12k |
Azure-Samples/functions-python-web-crawler | .venv/Lib/site-packages/charset_normalizer/cd.py | [
{
"identifier": "FREQUENCIES",
"path": ".venv/Lib/site-packages/charset_normalizer/constant.py",
"snippet": "FREQUENCIES: Dict[str, List[str]] = {\n \"English\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"u\",\n \"m\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"y\",\n \"b\",\n \"v\",\n \"k\",\n \"x\",\n \"j\",\n \"z\",\n \"q\",\n ],\n \"English—\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"m\",\n \"u\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"b\",\n \"y\",\n \"v\",\n \"k\",\n \"j\",\n \"x\",\n \"z\",\n \"q\",\n ],\n \"German\": [\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"s\",\n \"t\",\n \"a\",\n \"d\",\n \"h\",\n \"u\",\n \"l\",\n \"g\",\n \"o\",\n \"c\",\n \"m\",\n \"b\",\n \"f\",\n \"k\",\n \"w\",\n \"z\",\n \"p\",\n \"v\",\n \"ü\",\n \"ä\",\n \"ö\",\n \"j\",\n ],\n \"French\": [\n \"e\",\n \"a\",\n \"s\",\n \"n\",\n \"i\",\n \"t\",\n \"r\",\n \"l\",\n \"u\",\n \"o\",\n \"d\",\n \"c\",\n \"p\",\n \"m\",\n \"é\",\n \"v\",\n \"g\",\n \"f\",\n \"b\",\n \"h\",\n \"q\",\n \"à\",\n \"x\",\n \"è\",\n \"y\",\n \"j\",\n ],\n \"Dutch\": [\n \"e\",\n \"n\",\n \"a\",\n \"i\",\n \"r\",\n \"t\",\n \"o\",\n \"d\",\n \"s\",\n \"l\",\n \"g\",\n \"h\",\n \"v\",\n \"m\",\n \"u\",\n \"k\",\n \"c\",\n \"p\",\n \"b\",\n \"w\",\n \"j\",\n \"z\",\n \"f\",\n \"y\",\n \"x\",\n \"ë\",\n ],\n \"Italian\": [\n \"e\",\n \"i\",\n \"a\",\n \"o\",\n \"n\",\n \"l\",\n \"t\",\n \"r\",\n \"s\",\n \"c\",\n \"d\",\n \"u\",\n \"p\",\n \"m\",\n \"g\",\n \"v\",\n \"f\",\n \"b\",\n \"z\",\n \"h\",\n \"q\",\n \"è\",\n \"à\",\n \"k\",\n \"y\",\n \"ò\",\n ],\n \"Polish\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"z\",\n \"w\",\n \"s\",\n \"c\",\n \"t\",\n \"k\",\n \"y\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"l\",\n \"j\",\n \"ł\",\n \"g\",\n \"b\",\n \"h\",\n \"ą\",\n \"ę\",\n \"ó\",\n ],\n \"Spanish\": [\n \"e\",\n \"a\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"i\",\n \"l\",\n \"d\",\n \"t\",\n \"c\",\n \"u\",\n \"m\",\n \"p\",\n \"b\",\n \"g\",\n \"v\",\n \"f\",\n \"y\",\n \"ó\",\n \"h\",\n \"q\",\n \"í\",\n \"j\",\n \"z\",\n \"á\",\n ],\n \"Russian\": [\n \"о\",\n \"а\",\n \"е\",\n \"и\",\n \"н\",\n \"с\",\n \"т\",\n \"р\",\n \"в\",\n \"л\",\n \"к\",\n \"м\",\n \"д\",\n \"п\",\n \"у\",\n \"г\",\n \"я\",\n \"ы\",\n \"з\",\n \"б\",\n \"й\",\n \"ь\",\n \"ч\",\n \"х\",\n \"ж\",\n \"ц\",\n ],\n # Jap-Kanji\n \"Japanese\": [\n \"人\",\n \"一\",\n \"大\",\n \"亅\",\n \"丁\",\n \"丨\",\n \"竹\",\n \"笑\",\n \"口\",\n \"日\",\n \"今\",\n \"二\",\n \"彳\",\n \"行\",\n \"十\",\n \"土\",\n \"丶\",\n \"寸\",\n \"寺\",\n \"時\",\n \"乙\",\n \"丿\",\n \"乂\",\n \"气\",\n \"気\",\n \"冂\",\n \"巾\",\n \"亠\",\n \"市\",\n \"目\",\n \"儿\",\n \"見\",\n \"八\",\n \"小\",\n \"凵\",\n \"県\",\n \"月\",\n \"彐\",\n \"門\",\n \"間\",\n \"木\",\n \"東\",\n \"山\",\n \"出\",\n \"本\",\n \"中\",\n \"刀\",\n \"分\",\n \"耳\",\n \"又\",\n \"取\",\n \"最\",\n \"言\",\n \"田\",\n \"心\",\n \"思\",\n \"刂\",\n \"前\",\n \"京\",\n \"尹\",\n \"事\",\n \"生\",\n \"厶\",\n \"云\",\n \"会\",\n \"未\",\n \"来\",\n \"白\",\n \"冫\",\n \"楽\",\n \"灬\",\n \"馬\",\n \"尸\",\n \"尺\",\n \"駅\",\n \"明\",\n \"耂\",\n \"者\",\n \"了\",\n \"阝\",\n \"都\",\n \"高\",\n \"卜\",\n \"占\",\n \"厂\",\n \"广\",\n \"店\",\n \"子\",\n \"申\",\n \"奄\",\n \"亻\",\n \"俺\",\n \"上\",\n \"方\",\n \"冖\",\n \"学\",\n \"衣\",\n \"艮\",\n \"食\",\n \"自\",\n ],\n # Jap-Katakana\n \"Japanese—\": [\n \"ー\",\n \"ン\",\n \"ス\",\n \"・\",\n \"ル\",\n \"ト\",\n \"リ\",\n \"イ\",\n \"ア\",\n \"ラ\",\n \"ッ\",\n \"ク\",\n \"ド\",\n \"シ\",\n \"レ\",\n \"ジ\",\n \"タ\",\n \"フ\",\n \"ロ\",\n \"カ\",\n \"テ\",\n \"マ\",\n \"ィ\",\n \"グ\",\n \"バ\",\n \"ム\",\n \"プ\",\n \"オ\",\n \"コ\",\n \"デ\",\n \"ニ\",\n \"ウ\",\n \"メ\",\n \"サ\",\n \"ビ\",\n \"ナ\",\n \"ブ\",\n \"ャ\",\n \"エ\",\n \"ュ\",\n \"チ\",\n \"キ\",\n \"ズ\",\n \"ダ\",\n \"パ\",\n \"ミ\",\n \"ェ\",\n \"ョ\",\n \"ハ\",\n \"セ\",\n \"ベ\",\n \"ガ\",\n \"モ\",\n \"ツ\",\n \"ネ\",\n \"ボ\",\n \"ソ\",\n \"ノ\",\n \"ァ\",\n \"ヴ\",\n \"ワ\",\n \"ポ\",\n \"ペ\",\n \"ピ\",\n \"ケ\",\n \"ゴ\",\n \"ギ\",\n \"ザ\",\n \"ホ\",\n \"ゲ\",\n \"ォ\",\n \"ヤ\",\n \"ヒ\",\n \"ユ\",\n \"ヨ\",\n \"ヘ\",\n \"ゼ\",\n \"ヌ\",\n \"ゥ\",\n \"ゾ\",\n \"ヶ\",\n \"ヂ\",\n \"ヲ\",\n \"ヅ\",\n \"ヵ\",\n \"ヱ\",\n \"ヰ\",\n \"ヮ\",\n \"ヽ\",\n \"゠\",\n \"ヾ\",\n \"ヷ\",\n \"ヿ\",\n \"ヸ\",\n \"ヹ\",\n \"ヺ\",\n ],\n # Jap-Hiragana\n \"Japanese——\": [\n \"の\",\n \"に\",\n \"る\",\n \"た\",\n \"と\",\n \"は\",\n \"し\",\n \"い\",\n \"を\",\n \"で\",\n \"て\",\n \"が\",\n \"な\",\n \"れ\",\n \"か\",\n \"ら\",\n \"さ\",\n \"っ\",\n \"り\",\n \"す\",\n \"あ\",\n \"も\",\n \"こ\",\n \"ま\",\n \"う\",\n \"く\",\n \"よ\",\n \"き\",\n \"ん\",\n \"め\",\n \"お\",\n \"け\",\n \"そ\",\n \"つ\",\n \"だ\",\n \"や\",\n \"え\",\n \"ど\",\n \"わ\",\n \"ち\",\n \"み\",\n \"せ\",\n \"じ\",\n \"ば\",\n \"へ\",\n \"び\",\n \"ず\",\n \"ろ\",\n \"ほ\",\n \"げ\",\n \"む\",\n \"べ\",\n \"ひ\",\n \"ょ\",\n \"ゆ\",\n \"ぶ\",\n \"ご\",\n \"ゃ\",\n \"ね\",\n \"ふ\",\n \"ぐ\",\n \"ぎ\",\n \"ぼ\",\n \"ゅ\",\n \"づ\",\n \"ざ\",\n \"ぞ\",\n \"ぬ\",\n \"ぜ\",\n \"ぱ\",\n \"ぽ\",\n \"ぷ\",\n \"ぴ\",\n \"ぃ\",\n \"ぁ\",\n \"ぇ\",\n \"ぺ\",\n \"ゞ\",\n \"ぢ\",\n \"ぉ\",\n \"ぅ\",\n \"ゐ\",\n \"ゝ\",\n \"ゑ\",\n \"゛\",\n \"゜\",\n \"ゎ\",\n \"ゔ\",\n \"゚\",\n \"ゟ\",\n \"゙\",\n \"ゕ\",\n \"ゖ\",\n ],\n \"Portuguese\": [\n \"a\",\n \"e\",\n \"o\",\n \"s\",\n \"i\",\n \"r\",\n \"d\",\n \"n\",\n \"t\",\n \"m\",\n \"u\",\n \"c\",\n \"l\",\n \"p\",\n \"g\",\n \"v\",\n \"b\",\n \"f\",\n \"h\",\n \"ã\",\n \"q\",\n \"é\",\n \"ç\",\n \"á\",\n \"z\",\n \"í\",\n ],\n \"Swedish\": [\n \"e\",\n \"a\",\n \"n\",\n \"r\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"d\",\n \"o\",\n \"m\",\n \"k\",\n \"g\",\n \"v\",\n \"h\",\n \"f\",\n \"u\",\n \"p\",\n \"ä\",\n \"c\",\n \"b\",\n \"ö\",\n \"å\",\n \"y\",\n \"j\",\n \"x\",\n ],\n \"Chinese\": [\n \"的\",\n \"一\",\n \"是\",\n \"不\",\n \"了\",\n \"在\",\n \"人\",\n \"有\",\n \"我\",\n \"他\",\n \"这\",\n \"个\",\n \"们\",\n \"中\",\n \"来\",\n \"上\",\n \"大\",\n \"为\",\n \"和\",\n \"国\",\n \"地\",\n \"到\",\n \"以\",\n \"说\",\n \"时\",\n \"要\",\n \"就\",\n \"出\",\n \"会\",\n \"可\",\n \"也\",\n \"你\",\n \"对\",\n \"生\",\n \"能\",\n \"而\",\n \"子\",\n \"那\",\n \"得\",\n \"于\",\n \"着\",\n \"下\",\n \"自\",\n \"之\",\n \"年\",\n \"过\",\n \"发\",\n \"后\",\n \"作\",\n \"里\",\n \"用\",\n \"道\",\n \"行\",\n \"所\",\n \"然\",\n \"家\",\n \"种\",\n \"事\",\n \"成\",\n \"方\",\n \"多\",\n \"经\",\n \"么\",\n \"去\",\n \"法\",\n \"学\",\n \"如\",\n \"都\",\n \"同\",\n \"现\",\n \"当\",\n \"没\",\n \"动\",\n \"面\",\n \"起\",\n \"看\",\n \"定\",\n \"天\",\n \"分\",\n \"还\",\n \"进\",\n \"好\",\n \"小\",\n \"部\",\n \"其\",\n \"些\",\n \"主\",\n \"样\",\n \"理\",\n \"心\",\n \"她\",\n \"本\",\n \"前\",\n \"开\",\n \"但\",\n \"因\",\n \"只\",\n \"从\",\n \"想\",\n \"实\",\n ],\n \"Ukrainian\": [\n \"о\",\n \"а\",\n \"н\",\n \"і\",\n \"и\",\n \"р\",\n \"в\",\n \"т\",\n \"е\",\n \"с\",\n \"к\",\n \"л\",\n \"у\",\n \"д\",\n \"м\",\n \"п\",\n \"з\",\n \"я\",\n \"ь\",\n \"б\",\n \"г\",\n \"й\",\n \"ч\",\n \"х\",\n \"ц\",\n \"ї\",\n ],\n \"Norwegian\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"s\",\n \"i\",\n \"o\",\n \"l\",\n \"d\",\n \"g\",\n \"k\",\n \"m\",\n \"v\",\n \"f\",\n \"p\",\n \"u\",\n \"b\",\n \"h\",\n \"å\",\n \"y\",\n \"j\",\n \"ø\",\n \"c\",\n \"æ\",\n \"w\",\n ],\n \"Finnish\": [\n \"a\",\n \"i\",\n \"n\",\n \"t\",\n \"e\",\n \"s\",\n \"l\",\n \"o\",\n \"u\",\n \"k\",\n \"ä\",\n \"m\",\n \"r\",\n \"v\",\n \"j\",\n \"h\",\n \"p\",\n \"y\",\n \"d\",\n \"ö\",\n \"g\",\n \"c\",\n \"b\",\n \"f\",\n \"w\",\n \"z\",\n ],\n \"Vietnamese\": [\n \"n\",\n \"h\",\n \"t\",\n \"i\",\n \"c\",\n \"g\",\n \"a\",\n \"o\",\n \"u\",\n \"m\",\n \"l\",\n \"r\",\n \"à\",\n \"đ\",\n \"s\",\n \"e\",\n \"v\",\n \"p\",\n \"b\",\n \"y\",\n \"ư\",\n \"d\",\n \"á\",\n \"k\",\n \"ộ\",\n \"ế\",\n ],\n \"Czech\": [\n \"o\",\n \"e\",\n \"a\",\n \"n\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"v\",\n \"r\",\n \"k\",\n \"d\",\n \"u\",\n \"m\",\n \"p\",\n \"í\",\n \"c\",\n \"h\",\n \"z\",\n \"á\",\n \"y\",\n \"j\",\n \"b\",\n \"ě\",\n \"é\",\n \"ř\",\n ],\n \"Hungarian\": [\n \"e\",\n \"a\",\n \"t\",\n \"l\",\n \"s\",\n \"n\",\n \"k\",\n \"r\",\n \"i\",\n \"o\",\n \"z\",\n \"á\",\n \"é\",\n \"g\",\n \"m\",\n \"b\",\n \"y\",\n \"v\",\n \"d\",\n \"h\",\n \"u\",\n \"p\",\n \"j\",\n \"ö\",\n \"f\",\n \"c\",\n ],\n \"Korean\": [\n \"이\",\n \"다\",\n \"에\",\n \"의\",\n \"는\",\n \"로\",\n \"하\",\n \"을\",\n \"가\",\n \"고\",\n \"지\",\n \"서\",\n \"한\",\n \"은\",\n \"기\",\n \"으\",\n \"년\",\n \"대\",\n \"사\",\n \"시\",\n \"를\",\n \"리\",\n \"도\",\n \"인\",\n \"스\",\n \"일\",\n ],\n \"Indonesian\": [\n \"a\",\n \"n\",\n \"e\",\n \"i\",\n \"r\",\n \"t\",\n \"u\",\n \"s\",\n \"d\",\n \"k\",\n \"m\",\n \"l\",\n \"g\",\n \"p\",\n \"b\",\n \"o\",\n \"h\",\n \"y\",\n \"j\",\n \"c\",\n \"w\",\n \"f\",\n \"v\",\n \"z\",\n \"x\",\n \"q\",\n ],\n \"Turkish\": [\n \"a\",\n \"e\",\n \"i\",\n \"n\",\n \"r\",\n \"l\",\n \"ı\",\n \"k\",\n \"d\",\n \"t\",\n \"s\",\n \"m\",\n \"y\",\n \"u\",\n \"o\",\n \"b\",\n \"ü\",\n \"ş\",\n \"v\",\n \"g\",\n \"z\",\n \"h\",\n \"c\",\n \"p\",\n \"ç\",\n \"ğ\",\n ],\n \"Romanian\": [\n \"e\",\n \"i\",\n \"a\",\n \"r\",\n \"n\",\n \"t\",\n \"u\",\n \"l\",\n \"o\",\n \"c\",\n \"s\",\n \"d\",\n \"p\",\n \"m\",\n \"ă\",\n \"f\",\n \"v\",\n \"î\",\n \"g\",\n \"b\",\n \"ș\",\n \"ț\",\n \"z\",\n \"h\",\n \"â\",\n \"j\",\n ],\n \"Farsi\": [\n \"ا\",\n \"ی\",\n \"ر\",\n \"د\",\n \"ن\",\n \"ه\",\n \"و\",\n \"م\",\n \"ت\",\n \"ب\",\n \"س\",\n \"ل\",\n \"ک\",\n \"ش\",\n \"ز\",\n \"ف\",\n \"گ\",\n \"ع\",\n \"خ\",\n \"ق\",\n \"ج\",\n \"آ\",\n \"پ\",\n \"ح\",\n \"ط\",\n \"ص\",\n ],\n \"Arabic\": [\n \"ا\",\n \"ل\",\n \"ي\",\n \"م\",\n \"و\",\n \"ن\",\n \"ر\",\n \"ت\",\n \"ب\",\n \"ة\",\n \"ع\",\n \"د\",\n \"س\",\n \"ف\",\n \"ه\",\n \"ك\",\n \"ق\",\n \"أ\",\n \"ح\",\n \"ج\",\n \"ش\",\n \"ط\",\n \"ص\",\n \"ى\",\n \"خ\",\n \"إ\",\n ],\n \"Danish\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"i\",\n \"s\",\n \"d\",\n \"l\",\n \"o\",\n \"g\",\n \"m\",\n \"k\",\n \"f\",\n \"v\",\n \"u\",\n \"b\",\n \"h\",\n \"p\",\n \"å\",\n \"y\",\n \"ø\",\n \"æ\",\n \"c\",\n \"j\",\n \"w\",\n ],\n \"Serbian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"р\",\n \"с\",\n \"у\",\n \"т\",\n \"к\",\n \"ј\",\n \"в\",\n \"д\",\n \"м\",\n \"п\",\n \"л\",\n \"г\",\n \"з\",\n \"б\",\n \"a\",\n \"i\",\n \"e\",\n \"o\",\n \"n\",\n \"ц\",\n \"ш\",\n ],\n \"Lithuanian\": [\n \"i\",\n \"a\",\n \"s\",\n \"o\",\n \"r\",\n \"e\",\n \"t\",\n \"n\",\n \"u\",\n \"k\",\n \"m\",\n \"l\",\n \"p\",\n \"v\",\n \"d\",\n \"j\",\n \"g\",\n \"ė\",\n \"b\",\n \"y\",\n \"ų\",\n \"š\",\n \"ž\",\n \"c\",\n \"ą\",\n \"į\",\n ],\n \"Slovene\": [\n \"e\",\n \"a\",\n \"i\",\n \"o\",\n \"n\",\n \"r\",\n \"s\",\n \"l\",\n \"t\",\n \"j\",\n \"v\",\n \"k\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"z\",\n \"b\",\n \"g\",\n \"h\",\n \"č\",\n \"c\",\n \"š\",\n \"ž\",\n \"f\",\n \"y\",\n ],\n \"Slovak\": [\n \"o\",\n \"a\",\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"v\",\n \"t\",\n \"s\",\n \"l\",\n \"k\",\n \"d\",\n \"m\",\n \"p\",\n \"u\",\n \"c\",\n \"h\",\n \"j\",\n \"b\",\n \"z\",\n \"á\",\n \"y\",\n \"ý\",\n \"í\",\n \"č\",\n \"é\",\n ],\n \"Hebrew\": [\n \"י\",\n \"ו\",\n \"ה\",\n \"ל\",\n \"ר\",\n \"ב\",\n \"ת\",\n \"מ\",\n \"א\",\n \"ש\",\n \"נ\",\n \"ע\",\n \"ם\",\n \"ד\",\n \"ק\",\n \"ח\",\n \"פ\",\n \"ס\",\n \"כ\",\n \"ג\",\n \"ט\",\n \"צ\",\n \"ן\",\n \"ז\",\n \"ך\",\n ],\n \"Bulgarian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"с\",\n \"в\",\n \"л\",\n \"к\",\n \"д\",\n \"п\",\n \"м\",\n \"з\",\n \"г\",\n \"я\",\n \"ъ\",\n \"у\",\n \"б\",\n \"ч\",\n \"ц\",\n \"й\",\n \"ж\",\n \"щ\",\n \"х\",\n ],\n \"Croatian\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"j\",\n \"s\",\n \"t\",\n \"u\",\n \"k\",\n \"l\",\n \"v\",\n \"d\",\n \"m\",\n \"p\",\n \"g\",\n \"z\",\n \"b\",\n \"c\",\n \"č\",\n \"h\",\n \"š\",\n \"ž\",\n \"ć\",\n \"f\",\n ],\n \"Hindi\": [\n \"क\",\n \"र\",\n \"स\",\n \"न\",\n \"त\",\n \"म\",\n \"ह\",\n \"प\",\n \"य\",\n \"ल\",\n \"व\",\n \"ज\",\n \"द\",\n \"ग\",\n \"ब\",\n \"श\",\n \"ट\",\n \"अ\",\n \"ए\",\n \"थ\",\n \"भ\",\n \"ड\",\n \"च\",\n \"ध\",\n \"ष\",\n \"इ\",\n ],\n \"Estonian\": [\n \"a\",\n \"i\",\n \"e\",\n \"s\",\n \"t\",\n \"l\",\n \"u\",\n \"n\",\n \"o\",\n \"k\",\n \"r\",\n \"d\",\n \"m\",\n \"v\",\n \"g\",\n \"p\",\n \"j\",\n \"h\",\n \"ä\",\n \"b\",\n \"õ\",\n \"ü\",\n \"f\",\n \"c\",\n \"ö\",\n \"y\",\n ],\n \"Thai\": [\n \"า\",\n \"น\",\n \"ร\",\n \"อ\",\n \"ก\",\n \"เ\",\n \"ง\",\n \"ม\",\n \"ย\",\n \"ล\",\n \"ว\",\n \"ด\",\n \"ท\",\n \"ส\",\n \"ต\",\n \"ะ\",\n \"ป\",\n \"บ\",\n \"ค\",\n \"ห\",\n \"แ\",\n \"จ\",\n \"พ\",\n \"ช\",\n \"ข\",\n \"ใ\",\n ],\n \"Greek\": [\n \"α\",\n \"τ\",\n \"ο\",\n \"ι\",\n \"ε\",\n \"ν\",\n \"ρ\",\n \"σ\",\n \"κ\",\n \"η\",\n \"π\",\n \"ς\",\n \"υ\",\n \"μ\",\n \"λ\",\n \"ί\",\n \"ό\",\n \"ά\",\n \"γ\",\n \"έ\",\n \"δ\",\n \"ή\",\n \"ω\",\n \"χ\",\n \"θ\",\n \"ύ\",\n ],\n \"Tamil\": [\n \"க\",\n \"த\",\n \"ப\",\n \"ட\",\n \"ர\",\n \"ம\",\n \"ல\",\n \"ன\",\n \"வ\",\n \"ற\",\n \"ய\",\n \"ள\",\n \"ச\",\n \"ந\",\n \"இ\",\n \"ண\",\n \"அ\",\n \"ஆ\",\n \"ழ\",\n \"ங\",\n \"எ\",\n \"உ\",\n \"ஒ\",\n \"ஸ\",\n ],\n \"Kazakh\": [\n \"а\",\n \"ы\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"л\",\n \"і\",\n \"д\",\n \"с\",\n \"м\",\n \"қ\",\n \"к\",\n \"о\",\n \"б\",\n \"и\",\n \"у\",\n \"ғ\",\n \"ж\",\n \"ң\",\n \"з\",\n \"ш\",\n \"й\",\n \"п\",\n \"г\",\n \"ө\",\n ],\n}"
},
{
"identifier": "KO_NAMES",
"path": ".venv/Lib/site-packages/charset_normalizer/constant.py",
"snippet": "KO_NAMES: Set[str] = {\"johab\", \"cp949\", \"euc_kr\"}"
},
{
"identifier": "LANGUAGE_SUPPORTED_COUNT",
"path": ".venv/Lib/site-packages/charset_normalizer/constant.py",
"snippet": "LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)"
},
{
"identifier": "TOO_SMALL_SEQUENCE",
"path": ".venv/Lib/site-packages/charset_normalizer/constant.py",
"snippet": "TOO_SMALL_SEQUENCE: int = 32"
},
{
"identifier": "ZH_NAMES",
"path": ".venv/Lib/site-packages/charset_normalizer/constant.py",
"snippet": "ZH_NAMES: Set[str] = {\"big5\", \"cp950\", \"big5hkscs\", \"hz\"}"
},
{
"identifier": "is_suspiciously_successive_range",
"path": ".venv/Lib/site-packages/charset_normalizer/md.py",
"snippet": "@lru_cache(maxsize=1024)\ndef is_suspiciously_successive_range(\n unicode_range_a: Optional[str], unicode_range_b: Optional[str]\n) -> bool:\n \"\"\"\n Determine if two Unicode range seen next to each other can be considered as suspicious.\n \"\"\"\n if unicode_range_a is None or unicode_range_b is None:\n return True\n\n if unicode_range_a == unicode_range_b:\n return False\n\n if \"Latin\" in unicode_range_a and \"Latin\" in unicode_range_b:\n return False\n\n if \"Emoticons\" in unicode_range_a or \"Emoticons\" in unicode_range_b:\n return False\n\n # Latin characters can be accompanied with a combining diacritical mark\n # eg. Vietnamese.\n if (\"Latin\" in unicode_range_a or \"Latin\" in unicode_range_b) and (\n \"Combining\" in unicode_range_a or \"Combining\" in unicode_range_b\n ):\n return False\n\n keywords_range_a, keywords_range_b = unicode_range_a.split(\n \" \"\n ), unicode_range_b.split(\" \")\n\n for el in keywords_range_a:\n if el in UNICODE_SECONDARY_RANGE_KEYWORD:\n continue\n if el in keywords_range_b:\n return False\n\n # Japanese Exception\n range_a_jp_chars, range_b_jp_chars = (\n unicode_range_a\n in (\n \"Hiragana\",\n \"Katakana\",\n ),\n unicode_range_b in (\"Hiragana\", \"Katakana\"),\n )\n if (range_a_jp_chars or range_b_jp_chars) and (\n \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b\n ):\n return False\n if range_a_jp_chars and range_b_jp_chars:\n return False\n\n if \"Hangul\" in unicode_range_a or \"Hangul\" in unicode_range_b:\n if \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b:\n return False\n if unicode_range_a == \"Basic Latin\" or unicode_range_b == \"Basic Latin\":\n return False\n\n # Chinese/Japanese use dedicated range for punctuation and/or separators.\n if (\"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b) or (\n unicode_range_a in [\"Katakana\", \"Hiragana\"]\n and unicode_range_b in [\"Katakana\", \"Hiragana\"]\n ):\n if \"Punctuation\" in unicode_range_a or \"Punctuation\" in unicode_range_b:\n return False\n if \"Forms\" in unicode_range_a or \"Forms\" in unicode_range_b:\n return False\n if unicode_range_a == \"Basic Latin\" or unicode_range_b == \"Basic Latin\":\n return False\n\n return True"
},
{
"identifier": "CoherenceMatches",
"path": ".venv/Lib/site-packages/charset_normalizer/models.py",
"snippet": "class CharsetMatch:\nclass CharsetMatches:\nclass CliDetectionResult:\n def __init__(\n self,\n payload: bytes,\n guessed_encoding: str,\n mean_mess_ratio: float,\n has_sig_or_bom: bool,\n languages: \"CoherenceMatches\",\n decoded_payload: Optional[str] = None,\n ):\n def __eq__(self, other: object) -> bool:\n def __lt__(self, other: object) -> bool:\n def multi_byte_usage(self) -> float:\n def __str__(self) -> str:\n def __repr__(self) -> str:\n def add_submatch(self, other: \"CharsetMatch\") -> None:\n def encoding(self) -> str:\n def encoding_aliases(self) -> List[str]:\n def bom(self) -> bool:\n def byte_order_mark(self) -> bool:\n def languages(self) -> List[str]:\n def language(self) -> str:\n def chaos(self) -> float:\n def coherence(self) -> float:\n def percent_chaos(self) -> float:\n def percent_coherence(self) -> float:\n def raw(self) -> bytes:\n def submatch(self) -> List[\"CharsetMatch\"]:\n def has_submatch(self) -> bool:\n def alphabets(self) -> List[str]:\n def could_be_from_charset(self) -> List[str]:\n def output(self, encoding: str = \"utf_8\") -> bytes:\n def fingerprint(self) -> str:\n def __init__(self, results: Optional[List[CharsetMatch]] = None):\n def __iter__(self) -> Iterator[CharsetMatch]:\n def __getitem__(self, item: Union[int, str]) -> CharsetMatch:\n def __len__(self) -> int:\n def __bool__(self) -> bool:\n def append(self, item: CharsetMatch) -> None:\n def best(self) -> Optional[\"CharsetMatch\"]:\n def first(self) -> Optional[\"CharsetMatch\"]:\n def __init__(\n self,\n path: str,\n encoding: Optional[str],\n encoding_aliases: List[str],\n alternative_encodings: List[str],\n language: str,\n alphabets: List[str],\n has_sig_or_bom: bool,\n chaos: float,\n coherence: float,\n unicode_path: Optional[str],\n is_preferred: bool,\n ):\n def __dict__(self) -> Dict[str, Any]: # type: ignore\n def to_json(self) -> str:"
},
{
"identifier": "is_accentuated",
"path": ".venv/Lib/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_accentuated(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return (\n \"WITH GRAVE\" in description\n or \"WITH ACUTE\" in description\n or \"WITH CEDILLA\" in description\n or \"WITH DIAERESIS\" in description\n or \"WITH CIRCUMFLEX\" in description\n or \"WITH TILDE\" in description\n or \"WITH MACRON\" in description\n or \"WITH RING ABOVE\" in description\n )"
},
{
"identifier": "is_latin",
"path": ".venv/Lib/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_latin(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return \"LATIN\" in description"
},
{
"identifier": "is_multi_byte_encoding",
"path": ".venv/Lib/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=128)\ndef is_multi_byte_encoding(name: str) -> bool:\n \"\"\"\n Verify is a specific encoding is a multi byte one based on it IANA name\n \"\"\"\n return name in {\n \"utf_8\",\n \"utf_8_sig\",\n \"utf_16\",\n \"utf_16_be\",\n \"utf_16_le\",\n \"utf_32\",\n \"utf_32_le\",\n \"utf_32_be\",\n \"utf_7\",\n } or issubclass(\n importlib.import_module(\"encodings.{}\".format(name)).IncrementalDecoder,\n MultibyteIncrementalDecoder,\n )"
},
{
"identifier": "is_unicode_range_secondary",
"path": ".venv/Lib/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))\ndef is_unicode_range_secondary(range_name: str) -> bool:\n return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)"
},
{
"identifier": "unicode_range",
"path": ".venv/Lib/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef unicode_range(character: str) -> Optional[str]:\n \"\"\"\n Retrieve the Unicode range official name from a single character.\n \"\"\"\n character_ord: int = ord(character)\n\n for range_name, ord_range in UNICODE_RANGES_COMBINED.items():\n if character_ord in ord_range:\n return range_name\n\n return None"
}
] | import importlib
from codecs import IncrementalDecoder
from collections import Counter
from functools import lru_cache
from typing import Counter as TypeCounter, Dict, List, Optional, Tuple
from .constant import (
FREQUENCIES,
KO_NAMES,
LANGUAGE_SUPPORTED_COUNT,
TOO_SMALL_SEQUENCE,
ZH_NAMES,
)
from .md import is_suspiciously_successive_range
from .models import CoherenceMatches
from .utils import (
is_accentuated,
is_latin,
is_multi_byte_encoding,
is_unicode_range_secondary,
unicode_range,
) | 10,302 |
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
"""
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module(
"encodings.{}".format(iana_name)
).IncrementalDecoder
p: IncrementalDecoder = decoder(errors="ignore")
seen_ranges: Dict[str, int] = {}
character_count: int = 0
for i in range(0x40, 0xFF):
chunk: str = p.decode(bytes([i]))
if chunk:
character_range: Optional[str] = unicode_range(chunk)
if character_range is None:
continue
if is_unicode_range_secondary(character_range) is False:
if character_range not in seen_ranges:
seen_ranges[character_range] = 0
seen_ranges[character_range] += 1
character_count += 1
return sorted(
[
character_range
for character_range in seen_ranges
if seen_ranges[character_range] / character_count >= 0.15
]
)
def unicode_range_languages(primary_range: str) -> List[str]:
"""
Return inferred languages used with a unicode range.
"""
languages: List[str] = []
for language, characters in FREQUENCIES.items():
for character in characters:
if unicode_range(character) == primary_range:
languages.append(language)
break
return languages
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges: List[str] = encoding_unicode_range(iana_name)
primary_range: Optional[str] = None
for specified_range in unicode_ranges:
if "Latin" not in specified_range:
primary_range = specified_range
break
if primary_range is None:
return ["Latin Based"]
return unicode_range_languages(primary_range)
@lru_cache()
def mb_encoding_languages(iana_name: str) -> List[str]:
"""
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
if (
iana_name.startswith("shift_")
or iana_name.startswith("iso2022_jp")
or iana_name.startswith("euc_j")
or iana_name == "cp932"
):
return ["Japanese"]
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
return ["Chinese"]
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
return ["Korean"]
return []
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
def get_target_features(language: str) -> Tuple[bool, bool]:
"""
Determine main aspects from a supported language if it contains accents and if is pure Latin.
"""
target_have_accents: bool = False
target_pure_latin: bool = True
for character in FREQUENCIES[language]:
if not target_have_accents and is_accentuated(character):
target_have_accents = True
|
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
"""
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module(
"encodings.{}".format(iana_name)
).IncrementalDecoder
p: IncrementalDecoder = decoder(errors="ignore")
seen_ranges: Dict[str, int] = {}
character_count: int = 0
for i in range(0x40, 0xFF):
chunk: str = p.decode(bytes([i]))
if chunk:
character_range: Optional[str] = unicode_range(chunk)
if character_range is None:
continue
if is_unicode_range_secondary(character_range) is False:
if character_range not in seen_ranges:
seen_ranges[character_range] = 0
seen_ranges[character_range] += 1
character_count += 1
return sorted(
[
character_range
for character_range in seen_ranges
if seen_ranges[character_range] / character_count >= 0.15
]
)
def unicode_range_languages(primary_range: str) -> List[str]:
"""
Return inferred languages used with a unicode range.
"""
languages: List[str] = []
for language, characters in FREQUENCIES.items():
for character in characters:
if unicode_range(character) == primary_range:
languages.append(language)
break
return languages
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges: List[str] = encoding_unicode_range(iana_name)
primary_range: Optional[str] = None
for specified_range in unicode_ranges:
if "Latin" not in specified_range:
primary_range = specified_range
break
if primary_range is None:
return ["Latin Based"]
return unicode_range_languages(primary_range)
@lru_cache()
def mb_encoding_languages(iana_name: str) -> List[str]:
"""
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
if (
iana_name.startswith("shift_")
or iana_name.startswith("iso2022_jp")
or iana_name.startswith("euc_j")
or iana_name == "cp932"
):
return ["Japanese"]
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
return ["Chinese"]
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
return ["Korean"]
return []
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
def get_target_features(language: str) -> Tuple[bool, bool]:
"""
Determine main aspects from a supported language if it contains accents and if is pure Latin.
"""
target_have_accents: bool = False
target_pure_latin: bool = True
for character in FREQUENCIES[language]:
if not target_have_accents and is_accentuated(character):
target_have_accents = True | if target_pure_latin and is_latin(character) is False: | 8 | 2023-12-16 04:12:01+00:00 | 12k |
liebrandapps/FindMyGUI | main.py | [
{
"identifier": "AirTag",
"path": "airTag.py",
"snippet": "class AirTag:\n\n def __init__(self, ctx, jsonFile=None):\n self.log = ctx.log\n self.cfg = ctx.cfg\n self.__id = uuid.uuid4().hex\n self._name = \"\"\n self._privateKey = None\n self._advertisementKey = None\n self._hashedKey = None\n self._needsSave = False\n self._lastSeen = None\n self._latitude = None\n self._longitude = None\n self._history = {}\n self._imgId = \"airtag\"\n if jsonFile is None:\n airTagDir = ctx.cfg.general_airTagDirectory\n airTagSuffix = ctx.cfg.general_airTagSuffix\n self.fileName = join(airTagDir, self.__id + airTagSuffix)\n self._needsSave = True\n else:\n self.fileName = jsonFile\n self.load(jsonFile)\n\n @property\n def id(self):\n return self.__id\n\n def load(self, jsonFile):\n with open(jsonFile) as f:\n dta = json.load(f)\n self._name = dta['name']\n self._privateKey = base64.b64decode(dta['privateKey'])\n self._advertisementKey = base64.b64decode(dta['advertisementKey'])\n s256 = hashlib.sha256()\n s256.update(self._advertisementKey)\n self._hashedKey = base64.b64encode(s256.digest()).decode(\"ascii\")\n if 'id' in dta.keys():\n self.__id = dta['id']\n else:\n self.save()\n if 'lastSeen' in dta.keys():\n self._lastSeen = dta['lastSeen']\n self._longitude = dta['longitude']\n self._latitude = dta['latitude']\n if 'history' in dta.keys():\n self._history = dta['history']\n if 'imgId' in dta.keys():\n self._imgId = dta['imgId']\n self.log.info(f\"Loaded AirTag [{self._name} / {self.__id}] from file {self.fileName}\")\n self._needsSave = False\n\n def save(self):\n toRemove = []\n cutOff = datetime.now() - timedelta(days=self.cfg.general_history)\n for h in self._history.keys():\n if int(h) < cutOff.timestamp():\n toRemove.append(h)\n for r in toRemove:\n del self._history[r]\n j = self.toJSON()\n with open(self.fileName, 'w') as f:\n print(j, file=f)\n self.log.info(f\"Saved AirTag [{self._name} / {self.__id}] to file {self.fileName}\")\n self._needsSave = False\n\n @property\n def needsSave(self):\n return self._needsSave\n\n def toJSON(self):\n return json.dumps(self.toDict(), indent=4)\n\n def toDict(self):\n return {'name': self._name,\n 'privateKey': base64.b64encode(self._privateKey).decode('ascii'),\n 'advertisementKey': base64.b64encode(self._advertisementKey).decode('ascii'),\n 'lastSeen': self._lastSeen,\n 'longitude': self._longitude,\n 'latitude': self._latitude,\n 'history': self._history,\n 'imgId': self._imgId,\n 'id': self.id}\n\n def resolveTag(self, tag):\n value = \"notFound\"\n if tag == '##NAME##':\n value = self._name\n if tag == '##ID##':\n value = self.id\n if tag == '##LASTSEEN##':\n if self._lastSeen is None or int(self._lastSeen) == 0:\n value = \"Never\"\n else:\n value = datetime.utcfromtimestamp(self._lastSeen).strftime('%H:%M:%S %d.%m.%Y')\n return value\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n self._needsSave = self._needsSave or (value != self._name)\n self._name = value\n\n @property\n def privateKey(self):\n return base64.b64encode(self._privateKey).decode('ascii')\n\n @privateKey.setter\n def privateKey(self, value):\n v = base64.b64decode(value)\n self._needsSave = self._needsSave or (v != self._privateKey)\n self._privateKey = v\n\n @property\n def advertisementKey(self):\n return base64.b64encode(self._advertisementKey).decode('ascii')\n\n @advertisementKey.setter\n def advertisementKey(self, value):\n v = base64.b64decode(value)\n self._needsSave = self._needsSave or (v != self._advertisementKey)\n self._advertisementKey = v\n\n @property\n def hashedAdvKey(self):\n return self._hashedKey\n\n @property\n def lastSeen(self):\n return self._lastSeen\n\n @property\n def latitude(self):\n return self._latitude\n\n @property\n def longitude(self):\n return self._longitude\n\n def updateLocation(self, when, latitude, longitude):\n if self._lastSeen is None or when > self._lastSeen:\n self._longitude = longitude\n self._latitude = latitude\n self._lastSeen = when\n self._history[when] = {'lat': latitude, 'lon': longitude}\n self._needsSave = True\n\n @property\n def history(self):\n return self._history\n\n @property\n def imgId(self):\n return self._imgId\n\n @imgId.setter\n def imgId(self, value):\n self._needsSave = self._needsSave or value != self.imgId\n self._imgId = value"
},
{
"identifier": "API",
"path": "api.py",
"snippet": "class API:\n\n def __init__(self, ctx):\n self.ctx = ctx\n self.log = ctx.log\n\n def call(self, cmd, params=None):\n self.log.debug(f\"[API] Handling API command <{cmd}>\")\n result = {}\n if cmd == \"listTags\":\n result = self._listTags()\n if cmd == 'getPos':\n result = self._getPos()\n if cmd == 'refresh':\n result = self._refresh()\n if cmd == 'getTagData':\n result = self._getTagData(params['id'][0])\n if cmd == 'editTag':\n result = self._editTag(params['id'][0], params['name'][0], params['privateKey'][0],\n params['advertisementKey'][0], params['imgId'][0])\n if cmd == 'addTag':\n result = self._addTag(params['id'][0], params['name'][0], params['privateKey'][0],\n params['advertisementKey'][0], params['imgId'][0])\n if cmd == 'signInStatus':\n result = self._signInStatus(int(params['timeStamp'][0]))\n if cmd == 'creds':\n result = self._creds(params['userName'][0], params['password'][0])\n if cmd == 'auth':\n result = self._auth(params['ndFactor'][0])\n if cmd == 'lastLocationUpdate':\n result = self._lastLocationUpdate()\n return json.dumps(result if result is not None else {})\n\n def _listTags(self):\n dct = {}\n for id in self.ctx.airtags.keys():\n dct[id] = self.ctx.airtags[id].toDict()\n return dct\n\n def _getPos(self):\n findMy = FindMy(self.ctx)\n data = findMy.retrieveLocations()\n return data\n\n def _refresh(self):\n self.ctx.signInDone = False\n findMy = FindMy(self.ctx)\n try:\n data = findMy.retrieveLocations()\n except requests.exceptions.ConnectTimeout as e:\n msg = f\"[API] Anisette Server not running: {str(e)}\"\n self.ctx.errMsg = msg\n self.ctx.log.error(msg)\n data = {\"status\": \"fail\", \"msg\": msg}\n return data\n\n def _getTagData(self, id):\n self.log.debug(f\"[API] Cmds' getTagData parameter is id={id}\")\n if id in self.ctx.airtags.keys():\n tag = self.ctx.airtags[id]\n dct = tag.toDict()\n dct['status'] = 'ok'\n else:\n dct = {'status': 'fail', 'msg': 'tag not found', 'id': id}\n return dct\n\n def _editTag(self, id, name, privKey, advKey, imgId):\n self.log.debug(f\"[API] Cmds' editTag parameter are id={id}, name={name}, private Key={privKey}, \"\n f\"advertisementKey={advKey}\")\n if id in self.ctx.airtags.keys():\n tag = self.ctx.airtags[id]\n tag.name = name\n tag.privateKey = privKey\n tag.advertisementKey = advKey\n tag.imgId = imgId\n if tag.needsSave:\n tag.save()\n dct = {'status': 'ok', 'dataChanged': str(tag.needsSave)}\n else:\n dct = {'status': 'fail', 'msg': 'tag not found', 'id': id}\n return dct\n\n def _addTag(self, id, name, privKey, advKey, imgId):\n self.log.debug(f\"[API] Cmds' addTag parameter are id={id}, name={name}, private Key={privKey}, \"\n f\"advertisementKey={advKey}\")\n tag = AirTag(self.ctx)\n tag.name = name\n tag.privateKey = privKey\n tag.advertisementKey = advKey\n tag.imgId = imgId\n tag.save()\n self.ctx.airtags[tag.id] = tag\n return {'status': 'ok', 'id': tag.id}\n\n def _signInStatus(self, timeStamp):\n self.log.debug(f\"[API] Cmds' signInStatus parameter is timeStamp={timeStamp}\")\n dct = {'status': 'wait', 'timeStamp': timeStamp}\n idx = 3\n while idx > 0:\n if self.ctx.signInDone:\n dct['status'] = \"done\"\n self.ctx.signInDone = False\n break\n elif len(self.ctx.errMsg) > 0:\n dct['status'] = \"fail\"\n dct['msg'] = self.ctx.errMsg\n self.ctx.errMsg = \"\"\n break\n elif self.ctx.requestCreds > timeStamp:\n dct['status'] = \"creds\"\n dct['timeStamp'] = self.ctx.requestCreds\n break\n elif self.ctx.requestAuth > timeStamp:\n dct['status'] = \"auth\"\n dct['timeStamp'] = self.ctx.requestAuth\n break\n idx -= 1\n time.sleep(1.0)\n return dct\n\n def _creds(self, userName, password):\n self.log.debug(f\"[API] Cmds' creds parameter are userName={userName}, password=(is set: {len(password) > 0})\")\n self.ctx.userName = userName\n self.ctx.password = password\n return {'status': 'ok'}\n\n def _auth(self, ndFactor):\n self.log.debug(f\"[API] Cmds' auth parameter are ndFactor={ndFactor}\")\n self.ctx.ndFactor = str(ndFactor)\n return {'status': 'ok'}\n\n def _lastLocationUpdate(self):\n return {'lastLocationUpdate': self.ctx.lastLocationUpdate}"
},
{
"identifier": "Config",
"path": "config.py",
"snippet": "class Config:\n\n def __init__(self, cfgFile):\n self.cfg = RawConfigParser()\n _ = self.cfg.read(cfgFile)\n self.scope = {}\n self.lastget = None\n self.section = None\n\n def addScope(self, dictionary):\n for key in dictionary.keys():\n if key in self.scope.keys():\n self.scope[key].update(dictionary[key])\n else:\n self.scope[key] = dictionary[key]\n\n @staticmethod\n def hasKey(dct, key):\n k = key.upper()\n for d in dct:\n if d.upper() == k:\n return d\n return None\n\n def hasSection(self, section):\n return self.cfg.has_section(section)\n\n def hasOption(self, option):\n return self.cfg.has_option(self.section, option)\n\n #\n # name is one of the following:\n # - a single key(option), then section needs to be set before\n # - section_option\n def __getattr__(self, name):\n if self.lastget is None:\n # ok - now try section_option\n idx = name.split('_')\n if len(idx) > 1:\n # if we have more than one '_' in the string, section_option may be ambiguous\n tmpSection = idx[0]\n if tmpSection not in self.scope and len(idx) > 2:\n tmpSection = idx[0] + \"_\" + idx[1]\n idx[1] = \"_\".join(idx[2:])\n else:\n idx[1] = \"_\".join(idx[1:])\n if tmpSection in self.scope:\n option = idx[1]\n subScope = self.scope[tmpSection]\n if option in subScope:\n theTuple = subScope[option]\n if len(theTuple) > 1:\n defaultValue = [] if theTuple[0].upper().startswith('A') else theTuple[1]\n else:\n defaultValue = [] if theTuple[0].upper().startswith('A') else None\n if not(self.cfg.has_option(tmpSection, option)):\n return defaultValue\n if theTuple[0].startswith('S'):\n return self.cfg.get(tmpSection, option)\n if theTuple[0].startswith('I'):\n return self.cfg.getint(tmpSection, option)\n if theTuple[0].startswith('B'):\n return self.cfg.getboolean(tmpSection, option)\n if theTuple[0].startswith(\"F\"):\n return self.cfg.getfloat(tmpSection, option)\n if theTuple[0].upper().startswith('A'):\n return [] if self.cfg.get(tmpSection, option) is None \\\n else self.cfg.get(tmpSection, option).split(':')\n # target design: try section.option\n if self.lastget is None:\n if name in self.scope:\n self.lastget = name\n return self\n else:\n section = self.lastget\n self.lastget = None\n theTuple = self.scope[section][name]\n if not(self.cfg.has_section(section)):\n self.cfg.add_section(section)\n if not (self.cfg.has_option(section, name)) and len(theTuple) > 1:\n self.cfg.set(section, name, theTuple[1])\n if theTuple[0].upper().startswith('S'):\n return self.cfg.get(section, name)\n if theTuple[0].upper().startswith('I'):\n return self.cfg.getint(section, name)\n if theTuple[0].upper().startswith('B'):\n return self.cfg.getboolean(section, name)\n if theTuple[0].upper().startswith('A'):\n return [] if self.cfg.get(section, name) is None else self.cfg.get(section, name).split(':')\n return None\n\n def setSection(self, newSection):\n tmp = self.section\n self.section = newSection\n return tmp\n\n def readValue(self, key):\n return self.cfg.get(self.section, key)"
},
{
"identifier": "Context",
"path": "context.py",
"snippet": "class Context:\n statusFile = \"./findMyGUI.json\"\n\n def __init__(self, cfg, log):\n self.__log = log\n self.__cfg = cfg\n self.__threadMonitor = {}\n self.startTime = datetime.now()\n self.__airtags = {}\n self._signInDone = False\n self._requestCreds = 0\n self._requestAuth = 0\n self._userName = \"\"\n self._password = \"\"\n self._ndFactor = \"\"\n self._errMsg = \"\"\n self._lastLocationUpdate = 0\n\n def load(self):\n if exists(Context.statusFile):\n with open(Context.statusFile) as f:\n dta = json.load(f)\n self._lastLocationUpdate = dta['lastLocationUpdate']\n\n def save(self):\n j = {\"lastLocationUpdate\": self._lastLocationUpdate}\n with open(Context.statusFile, 'w') as f:\n print(json.dumps(j, indent=4), file=f)\n\n @property\n def log(self):\n return self.__log\n\n @property\n def cfg(self):\n return self.__cfg\n\n @property\n def airtags(self):\n return self.__airtags\n\n @property\n def threadMonitor(self):\n return self.__threadMonitor\n\n def checkThreads(self, now):\n missing = []\n for k in self.__threadMonitor.keys():\n if (now - self.__threadMonitor[k][0]).seconds > 900:\n # thread has not updated since 15 minutes\n self.__log.warn(\"[CTX] Thread for class %s has not sent an alive message for %d seconds\" %\n (k, (now - self.__threadMonitor[k][0]).seconds))\n missing.append(self.__threadMonitor[k])\n return missing\n\n def uptime(self, now):\n days = (now - self.startTime).days\n secs = (now - self.startTime).seconds\n hours = int((secs % 86400) / 3600)\n minutes = int((secs % 3600) / 60)\n seconds = int(secs % 60)\n\n up = \"\"\n if days > 0:\n up += str(days) + \" \" + (days == 1 and \"day\" or \"days\") + \", \"\n if len(up) > 0 or hours > 0:\n up += str(hours) + \" \" + (hours == 1 and \"hour\" or \"hours\") + \", \"\n if len(up) > 0 or minutes > 0:\n up += str(minutes) + \" \" + (minutes == 1 and \"minute\" or \"minutes\") + \", \"\n up += str(seconds) + \" \" + (seconds == 1 and \"second\" or \"seconds\")\n return up\n\n @property\n def requestCreds(self):\n return self._requestCreds\n\n @requestCreds.setter\n def requestCreds(self, value):\n self._requestCreds = value\n\n @property\n def requestAuth(self):\n return self._requestAuth\n\n @requestAuth.setter\n def requestAuth(self, value):\n self._requestAuth = value\n\n @property\n def signInDone(self):\n return self._signInDone\n\n @signInDone.setter\n def signInDone(self, value):\n self._signInDone = value\n\n @property\n def userName(self):\n return self._userName\n\n @userName.setter\n def userName(self, value):\n self._userName = value\n\n @property\n def password(self):\n return self._password\n\n @password.setter\n def password(self, value):\n self._password = value\n\n @property\n def ndFactor(self):\n return self._ndFactor\n\n @ndFactor.setter\n def ndFactor(self, value):\n self._ndFactor = value\n\n @property\n def errMsg(self):\n return self._errMsg\n\n @errMsg.setter\n def errMsg(self, value):\n self._errMsg = value\n\n @property\n def lastLocationUpdate(self):\n return self._lastLocationUpdate\n\n @lastLocationUpdate.setter\n def lastLocationUpdate(self, value):\n self._lastLocationUpdate = value\n self.save()"
},
{
"identifier": "Daemon",
"path": "daemon.py",
"snippet": "class Daemon:\n\n def __init__(self, pidFile, app, logFile):\n self.pidFile = pidFile\n self.logFile = logFile\n self.app = app\n\n @staticmethod\n def getTimeStamp():\n return time.strftime('%d.%m.%Y %H:%M:%S', time.localtime(time.time()))\n\n @staticmethod\n def printLogLine(file, message):\n file.write('%s %s\\n' % (Daemon.getTimeStamp(), message))\n file.flush()\n\n def startstop(self, todo, stdout=\"/dev/null\", stderr=None, stdin=\"/dev/null\"):\n try:\n pf = open(self.pidFile, 'r')\n pid = int(pf.read().strip())\n pf.close()\n except IOError:\n pid = None\n\n if 'stop' == todo or 'restart' == todo:\n if not pid:\n msg = \"[%s] Could not stop. Pidfile %s is missing\\n\" % (self.app, self.pidFile)\n Daemon.printLogLine(sys.stderr, msg)\n sys.exit(1)\n Daemon.printLogLine(sys.stdout, \"[%s] Stopping Process with PID %d\" % (self.app, pid))\n try:\n cnt = 10\n while 1:\n if cnt < 0:\n os.kill(pid, signal.SIGKILL)\n else:\n os.kill(pid, signal.SIGTERM)\n time.sleep(3)\n cnt -= 1\n except OSError as err:\n err = str(err)\n if err.find(\"No such process\") > 0:\n if \"stop\" == todo:\n if os.path.exists(self.pidFile):\n os.remove(self.pidFile)\n sys.exit(0)\n todo = \"start\"\n pid = None\n else:\n print(str(err))\n sys.exit(1)\n if 'start' == todo:\n if pid:\n msg = \"[%s] Start aborted since Pidfile %s exists\" % self.app\n Daemon.printLogLine(sys.stderr, msg % self.pidFile)\n sys.exit(1)\n Daemon.printLogLine(sys.stdout, \"[%s] Starting Process as Daemon\" % self.app)\n self.daemonize(stdout, stderr, stdin)\n if 'status' == todo:\n if pid:\n logFileStatus = os.path.exists(self.logFile)\n if logFileStatus:\n (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(self.logFile)\n logLastModified = time.ctime(mtime)\n else:\n logLastModified = \"never\"\n if psutil.pid_exists(pid):\n process = psutil.Process(pid)\n with process.oneshot():\n msg = \"[%s] Process with pid %d is running [%s], last log update [%s]\" \\\n % (self.app, pid, process.name(), logLastModified)\n self.printLogLine(sys.stdout, msg)\n sys.exit(0)\n else:\n msg = \"[%s] Process with pid %d is NOT running, but we have a PID file - maybe it crashed. Last \" \\\n \"log update [%s]\" % (self.app, pid, logLastModified)\n self.printLogLine(sys.stdout, msg)\n if os.path.exists(self.pidFile):\n os.remove(self.pidFile)\n sys.exit(3)\n else:\n msg = \"[%s] Process seems to be not running - no PIDFile (%s) found.\" % (self.app, self.pidFile)\n self.printLogLine(sys.stderr, msg)\n sys.exit(0)\n\n def daemonize(self, stdout='/dev/null', stderr=None, stdin='/dev/null'):\n if not stderr:\n stderr = stdout\n si = open(stdin, 'r')\n so = open(stdout, 'a+')\n se = open(stderr, 'a+')\n\n os.dup2(si.fileno(), sys.stdin.fileno())\n os.dup2(so.fileno(), sys.stdout.fileno())\n os.dup2(se.fileno(), sys.stderr.fileno())\n\n try:\n pid = os.fork()\n if pid > 0:\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"[%s] fork #1 failed (%d) %s\" % (self.app, e.errno, e.strerror))\n sys.exit(1)\n\n os.umask(0)\n os.setsid()\n\n try:\n pid = os.fork()\n if pid > 0:\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"[%s] fork #2 failed (%d) %s\" % (self.app, e.errno, e.strerror))\n sys.exit(1)\n pid = str(os.getpid())\n self.printLogLine(sys.stdout, \"[%s] Process started as Daemon with pid %s\" % (self.app, pid))\n if self.pidFile:\n open(self.pidFile, 'w+').write('%s\\n' % pid)"
}
] | import glob
import logging
import signal
import sys
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
from logging.handlers import RotatingFileHandler
from os import makedirs
from os.path import join, exists, splitext
from threading import Thread
from urllib.parse import parse_qs, urlparse
from airTag import AirTag
from api import API
from config import Config
from context import Context
from daemon import Daemon | 7,320 | loghdl.setLevel(cfg.logging_logLevel)
_log.addHandler(loghdl)
if cfg.logging_stdout and not runAsDaemon:
loghdl = logging.StreamHandler(sys.stdout)
loghdl.setFormatter(logging.Formatter(cfg.logging_msgFormat))
loghdl.setLevel(cfg.logging_logLevel)
_log.addHandler(loghdl)
_log.disabled = False
return _log
except Exception as e:
print("[%s] Unable to initialize logging. Reason: %s" % (APP, e))
return None
def terminate(sigNo, _):
global doTerminate
global myServer
global httpIsRunning
if doTerminate:
return
doTerminate = True
ctx.log.info(f"[{APP}] Terminating with Signal {sigNo} {sigs[sigNo]}")
if httpIsRunning:
Thread(target=myServer.shutdown).start()
def loadAirTags():
global ctx
airTagDir = ctx.cfg.general_airTagDirectory
airTagSuffix = ctx.cfg.general_airTagSuffix
if not exists(airTagDir):
ctx.log.info(
f"[loadAirTags] Airtags Directory '{airTagDir}' does not exist, creating it. This will be used to store Airtag key information.")
makedirs(airTagDir)
tags = glob.glob(join(airTagDir, '*' + airTagSuffix))
for t in tags:
airtag = AirTag(ctx, jsonFile=t)
ctx.airtags[airtag.id] = airtag
class FindMyServer(BaseHTTPRequestHandler):
''' Extension: ContentType, Encode '''
contentTypeDct = {'.html': ["text/html", True],
'.js': ["application/javascript", True],
'.css': ["text/css", True],
'.png': ["image/png", False],
}
def do_GET(self):
if self.path.startswith('/api'):
api = API(ctx)
query_components = parse_qs(urlparse(self.path).query)
cmd = query_components["command"]
result = api.call(cmd[0], params=query_components)
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(result.encode('UTF-8'))
else:
file = "/index.html" if self.path == "/" else self.path
file = join('www', file[1:])
ext = splitext(file)[1]
ct = self.contentTypeDct[ext] if ext in self.contentTypeDct.keys() else None
if exists(file) and ct is not None:
contentType = ct[0]
encode = ct[1]
self.send_response(200)
self.send_header("Content-type", contentType)
self.end_headers()
with open(file, 'r' if encode else 'rb') as f:
data = f.read()
self.wfile.write(data.encode('UTF-8') if encode else data)
else:
self.send_response(404)
self.end_headers()
if __name__ == '__main__':
doTerminate = False
initialConfig = {
"general": {
"httpHost": ['String', '0.0.0.0'],
"httpPort": ['Integer', 8008],
"httpFiles": ['String', 'www'],
"anisetteHost": ['String', 'http://192.168.2.15'],
"anisettePort": ['Integer', 6969],
"airTagDirectory": ['String', 'airtags'],
"airTagSuffix": ['String', '.json'],
"history": ["Integer", 30],
},
"logging": {
"logFile": ["String", "/tmp/findMyGUI.log"],
"maxFilesize": ["Integer", 1000000],
"msgFormat": ["String", "%(asctime)s, %(levelname)s, %(module)s {%(process)d}, %(lineno)d, %(message)s"],
"logLevel": ["Integer", 10],
"stdout": ["Boolean", True],
},
"appleId": {
"appleId": ["String", ''],
"password": ["String", ''],
"trustedDevice": ["Boolean", False],
}
}
path = join(CONFIG_DIR, CONFIG_FILE)
if not (exists(path)):
print(f"[{APP}] No config file {CONFIG_FILE} found at {CONFIG_DIR}, using defaults")
cfg = Config(path)
cfg.addScope(initialConfig)
runAsDaemon = False
if len(sys.argv) > 1:
todo = sys.argv[1]
if todo in ['start', 'stop', 'restart', 'status']:
runAsDaemon = True
pidFile = cfg.general_pidFile
logFile = cfg.logging_logFile
d = Daemon(pidFile, APP, logFile)
d.startstop(todo, stdout=logFile, stderr=logFile)
log = setupLogger()
if log is None:
sys.exit(-126)
| """
Mark Liebrand 2024
This file is part of FindMyGUI which is released under the Apache 2.0 License
See file LICENSE or go to for full license details https://github.com/liebrandapps/FindMyGUI
"""
APP = "findMyGUI"
CONFIG_DIR = "./"
CONFIG_FILE = "findMyGUI.ini"
def setupLogger():
global runAsDaemon
try:
_log = logging.Logger(APP)
loghdl = RotatingFileHandler(cfg.logging_logFile, 'a', cfg.logging_maxFilesize, 4)
loghdl.setFormatter(logging.Formatter(cfg.logging_msgFormat))
loghdl.setLevel(cfg.logging_logLevel)
_log.addHandler(loghdl)
if cfg.logging_stdout and not runAsDaemon:
loghdl = logging.StreamHandler(sys.stdout)
loghdl.setFormatter(logging.Formatter(cfg.logging_msgFormat))
loghdl.setLevel(cfg.logging_logLevel)
_log.addHandler(loghdl)
_log.disabled = False
return _log
except Exception as e:
print("[%s] Unable to initialize logging. Reason: %s" % (APP, e))
return None
def terminate(sigNo, _):
global doTerminate
global myServer
global httpIsRunning
if doTerminate:
return
doTerminate = True
ctx.log.info(f"[{APP}] Terminating with Signal {sigNo} {sigs[sigNo]}")
if httpIsRunning:
Thread(target=myServer.shutdown).start()
def loadAirTags():
global ctx
airTagDir = ctx.cfg.general_airTagDirectory
airTagSuffix = ctx.cfg.general_airTagSuffix
if not exists(airTagDir):
ctx.log.info(
f"[loadAirTags] Airtags Directory '{airTagDir}' does not exist, creating it. This will be used to store Airtag key information.")
makedirs(airTagDir)
tags = glob.glob(join(airTagDir, '*' + airTagSuffix))
for t in tags:
airtag = AirTag(ctx, jsonFile=t)
ctx.airtags[airtag.id] = airtag
class FindMyServer(BaseHTTPRequestHandler):
''' Extension: ContentType, Encode '''
contentTypeDct = {'.html': ["text/html", True],
'.js': ["application/javascript", True],
'.css': ["text/css", True],
'.png': ["image/png", False],
}
def do_GET(self):
if self.path.startswith('/api'):
api = API(ctx)
query_components = parse_qs(urlparse(self.path).query)
cmd = query_components["command"]
result = api.call(cmd[0], params=query_components)
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(result.encode('UTF-8'))
else:
file = "/index.html" if self.path == "/" else self.path
file = join('www', file[1:])
ext = splitext(file)[1]
ct = self.contentTypeDct[ext] if ext in self.contentTypeDct.keys() else None
if exists(file) and ct is not None:
contentType = ct[0]
encode = ct[1]
self.send_response(200)
self.send_header("Content-type", contentType)
self.end_headers()
with open(file, 'r' if encode else 'rb') as f:
data = f.read()
self.wfile.write(data.encode('UTF-8') if encode else data)
else:
self.send_response(404)
self.end_headers()
if __name__ == '__main__':
doTerminate = False
initialConfig = {
"general": {
"httpHost": ['String', '0.0.0.0'],
"httpPort": ['Integer', 8008],
"httpFiles": ['String', 'www'],
"anisetteHost": ['String', 'http://192.168.2.15'],
"anisettePort": ['Integer', 6969],
"airTagDirectory": ['String', 'airtags'],
"airTagSuffix": ['String', '.json'],
"history": ["Integer", 30],
},
"logging": {
"logFile": ["String", "/tmp/findMyGUI.log"],
"maxFilesize": ["Integer", 1000000],
"msgFormat": ["String", "%(asctime)s, %(levelname)s, %(module)s {%(process)d}, %(lineno)d, %(message)s"],
"logLevel": ["Integer", 10],
"stdout": ["Boolean", True],
},
"appleId": {
"appleId": ["String", ''],
"password": ["String", ''],
"trustedDevice": ["Boolean", False],
}
}
path = join(CONFIG_DIR, CONFIG_FILE)
if not (exists(path)):
print(f"[{APP}] No config file {CONFIG_FILE} found at {CONFIG_DIR}, using defaults")
cfg = Config(path)
cfg.addScope(initialConfig)
runAsDaemon = False
if len(sys.argv) > 1:
todo = sys.argv[1]
if todo in ['start', 'stop', 'restart', 'status']:
runAsDaemon = True
pidFile = cfg.general_pidFile
logFile = cfg.logging_logFile
d = Daemon(pidFile, APP, logFile)
d.startstop(todo, stdout=logFile, stderr=logFile)
log = setupLogger()
if log is None:
sys.exit(-126) | ctx = Context(cfg, log) | 3 | 2023-12-16 12:39:52+00:00 | 12k |
YaoFANGUK/video-subtitle-remover | backend/scenedetect/backends/opencv.py | [
{
"identifier": "FrameTimecode",
"path": "backend/scenedetect/frame_timecode.py",
"snippet": "class FrameTimecode:\n \"\"\"Object for frame-based timecodes, using the video framerate to compute back and\n forth between frame number and seconds/timecode.\n\n A timecode is valid only if it complies with one of the following three types/formats:\n\n 1. Timecode as `str` in the form 'HH:MM:SS[.nnn]' (`'01:23:45'` or `'01:23:45.678'`)\n 2. Number of seconds as `float`, or `str` in form 'Ss' or 'S.SSSs' (`'2s'` or `'2.3456s'`)\n 3. Exact number of frames as `int`, or `str` in form NNNNN (`123` or `'123'`)\n \"\"\"\n\n def __init__(self,\n timecode: Union[int, float, str, 'FrameTimecode'] = None,\n fps: Union[int, float, str, 'FrameTimecode'] = None):\n \"\"\"\n Arguments:\n timecode: A frame number (int), number of seconds (float), or timecode (str in\n the form `'HH:MM:SS'` or `'HH:MM:SS.nnn'`).\n fps: The framerate or FrameTimecode to use as a time base for all arithmetic.\n Raises:\n TypeError: Thrown if either `timecode` or `fps` are unsupported types.\n ValueError: Thrown when specifying a negative timecode or framerate.\n \"\"\"\n # The following two properties are what is used to keep track of time\n # in a frame-specific manner. Note that once the framerate is set,\n # the value should never be modified (only read if required).\n # TODO(v1.0): Make these actual @properties.\n self.framerate = None\n self.frame_num = None\n\n # Copy constructor. Only the timecode argument is used in this case.\n if isinstance(timecode, FrameTimecode):\n self.framerate = timecode.framerate\n self.frame_num = timecode.frame_num\n if fps is not None:\n raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')\n else:\n # Ensure other arguments are consistent with API.\n if fps is None:\n raise TypeError('Framerate (fps) is a required argument.')\n if isinstance(fps, FrameTimecode):\n fps = fps.framerate\n\n # Process the given framerate, if it was not already set.\n if not isinstance(fps, (int, float)):\n raise TypeError('Framerate must be of type int/float.')\n if (isinstance(fps, int) and not fps > 0) or (isinstance(fps, float)\n and not fps >= MAX_FPS_DELTA):\n raise ValueError('Framerate must be positive and greater than zero.')\n self.framerate = float(fps)\n\n # Process the timecode value, storing it as an exact number of frames.\n if isinstance(timecode, str):\n self.frame_num = self._parse_timecode_string(timecode)\n else:\n self.frame_num = self._parse_timecode_number(timecode)\n\n # TODO(v1.0): Add a `frame` property to replace the existing one and deprecate this getter.\n def get_frames(self) -> int:\n \"\"\"Get the current time/position in number of frames. This is the\n equivalent of accessing the self.frame_num property (which, along\n with the specified framerate, forms the base for all of the other\n time measurement calculations, e.g. the :meth:`get_seconds` method).\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).\n\n Returns:\n int: The current time in frames (the current frame number).\n \"\"\"\n return self.frame_num\n\n # TODO(v1.0): Add a `framerate` property to replace the existing one and deprecate this getter.\n def get_framerate(self) -> float:\n \"\"\"Get Framerate: Returns the framerate used by the FrameTimecode object.\n\n Returns:\n float: Framerate of the current FrameTimecode object, in frames per second.\n \"\"\"\n return self.framerate\n\n def equal_framerate(self, fps) -> bool:\n \"\"\"Equal Framerate: Determines if the passed framerate is equal to that of this object.\n\n Arguments:\n fps: Framerate to compare against within the precision constant defined in this module\n (see :data:`MAX_FPS_DELTA`).\n Returns:\n bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.\n\n \"\"\"\n return math.fabs(self.framerate - fps) < MAX_FPS_DELTA\n\n # TODO(v1.0): Add a `seconds` property to replace this and deprecate the existing one.\n def get_seconds(self) -> float:\n \"\"\"Get the frame's position in number of seconds.\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).\n\n Returns:\n float: The current time/position in seconds.\n \"\"\"\n return float(self.frame_num) / self.framerate\n\n # TODO(v1.0): Add a `timecode` property to replace this and deprecate the existing one.\n def get_timecode(self, precision: int = 3, use_rounding: bool = True) -> str:\n \"\"\"Get a formatted timecode string of the form HH:MM:SS[.nnn].\n\n Args:\n precision: The number of decimal places to include in the output ``[.nnn]``.\n use_rounding: Rounds the output to the desired precision. If False, the value\n will be truncated to the specified precision.\n\n Returns:\n str: The current time in the form ``\"HH:MM:SS[.nnn]\"``.\n \"\"\"\n # Compute hours and minutes based off of seconds, and update seconds.\n secs = self.get_seconds()\n base = 60.0 * 60.0\n hrs = int(secs / base)\n secs -= (hrs * base)\n base = 60.0\n mins = int(secs / base)\n secs -= (mins * base)\n # Convert seconds into string based on required precision.\n if precision > 0:\n if use_rounding:\n secs = round(secs, precision)\n msec = format(secs, '.%df' % precision)[-precision:]\n secs = '%02d.%s' % (int(secs), msec)\n else:\n secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)\n # Return hours, minutes, and seconds as a formatted timecode string.\n return '%02d:%02d:%s' % (hrs, mins, secs)\n\n # TODO(v1.0): Add a `previous` property to replace the existing one and deprecate this getter.\n def previous_frame(self) -> 'FrameTimecode':\n \"\"\"Return a new FrameTimecode for the previous frame (or 0 if on frame 0).\"\"\"\n new_timecode = FrameTimecode(self)\n new_timecode.frame_num = max(0, new_timecode.frame_num - 1)\n return new_timecode\n\n def _seconds_to_frames(self, seconds: float) -> int:\n \"\"\"Convert the passed value seconds to the nearest number of frames using\n the current FrameTimecode object's FPS (self.framerate).\n\n Returns:\n Integer number of frames the passed number of seconds represents using\n the current FrameTimecode's framerate property.\n \"\"\"\n return round(seconds * self.framerate)\n\n def _parse_timecode_number(self, timecode: Union[int, float]) -> int:\n \"\"\" Parse a timecode number, storing it as the exact number of frames.\n Can be passed as frame number (int), seconds (float)\n\n Raises:\n TypeError, ValueError\n \"\"\"\n # Process the timecode value, storing it as an exact number of frames.\n # Exact number of frames N\n if isinstance(timecode, int):\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive and greater than zero.')\n return timecode\n # Number of seconds S\n elif isinstance(timecode, float):\n if timecode < 0.0:\n raise ValueError('Timecode value must be positive and greater than zero.')\n return self._seconds_to_frames(timecode)\n # FrameTimecode\n elif isinstance(timecode, FrameTimecode):\n return timecode.frame_num\n elif timecode is None:\n raise TypeError('Timecode/frame number must be specified!')\n else:\n raise TypeError('Timecode format/type unrecognized.')\n\n def _parse_timecode_string(self, timecode_string: str) -> int:\n \"\"\"Parses a string based on the three possible forms (in timecode format,\n as an integer number of frames, or floating-point seconds, ending with 's').\n\n Requires that the `framerate` property is set before calling this method.\n Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',\n '9000', '300s', and '300.0s' are all possible valid values, all representing\n a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).\n\n Raises:\n TypeError, ValueError\n \"\"\"\n if self.framerate is None:\n raise TypeError('self.framerate must be set before calling _parse_timecode_string.')\n # Number of seconds S\n if timecode_string.endswith('s'):\n secs = timecode_string[:-1]\n if not secs.replace('.', '').isdigit():\n raise ValueError('All characters in timecode seconds string must be digits.')\n secs = float(secs)\n if secs < 0.0:\n raise ValueError('Timecode seconds value must be positive.')\n return self._seconds_to_frames(secs)\n # Exact number of frames N\n elif timecode_string.isdigit():\n timecode = int(timecode_string)\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive.')\n return timecode\n # Standard timecode in string format 'HH:MM:SS[.nnn]'\n else:\n tc_val = timecode_string.split(':')\n if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()\n and tc_val[2].replace('.', '').isdigit()):\n raise ValueError('Unrecognized or improperly formatted timecode string.')\n hrs, mins = int(tc_val[0]), int(tc_val[1])\n secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])\n if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):\n raise ValueError('Invalid timecode range (values outside allowed range).')\n secs += (((hrs * 60.0) + mins) * 60.0)\n return self._seconds_to_frames(secs)\n\n def __iadd__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num += other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num += other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for addition.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num += self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num += self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing addition with FrameTimecode.')\n if self.frame_num < 0: # Required to allow adding negative seconds/frames.\n self.frame_num = 0\n return self\n\n def __add__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return += other\n return to_return\n\n def __isub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num -= other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num -= other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for subtraction.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num -= self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num -= self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing subtraction with FrameTimecode: %s' %\n type(other))\n if self.frame_num < 0:\n self.frame_num = 0\n return self\n\n def __sub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return -= other\n return to_return\n\n def __eq__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n return self.frame_num == other\n elif isinstance(other, float):\n return self.get_seconds() == other\n elif isinstance(other, str):\n return self.frame_num == self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num == other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n elif other is None:\n return False\n else:\n raise TypeError('Unsupported type for performing == with FrameTimecode: %s' %\n type(other))\n\n def __ne__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n return not self == other\n\n def __lt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num < other\n elif isinstance(other, float):\n return self.get_seconds() < other\n elif isinstance(other, str):\n return self.frame_num < self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num < other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing < with FrameTimecode: %s' %\n type(other))\n\n def __le__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num <= other\n elif isinstance(other, float):\n return self.get_seconds() <= other\n elif isinstance(other, str):\n return self.frame_num <= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num <= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing <= with FrameTimecode: %s' %\n type(other))\n\n def __gt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num > other\n elif isinstance(other, float):\n return self.get_seconds() > other\n elif isinstance(other, str):\n return self.frame_num > self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num > other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing > with FrameTimecode: %s' %\n type(other))\n\n def __ge__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num >= other\n elif isinstance(other, float):\n return self.get_seconds() >= other\n elif isinstance(other, str):\n return self.frame_num >= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num >= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing >= with FrameTimecode: %s' %\n type(other))\n\n # TODO(v1.0): __int__ and __float__ should be removed. Mark as deprecated, and indicate\n # need to use relevant property instead.\n\n def __int__(self) -> int:\n return self.frame_num\n\n def __float__(self) -> float:\n return self.get_seconds()\n\n def __str__(self) -> str:\n return self.get_timecode()\n\n def __repr__(self) -> str:\n return '%s [frame=%d, fps=%.3f]' % (self.get_timecode(), self.frame_num, self.framerate)\n\n def __hash__(self) -> int:\n return self.frame_num"
},
{
"identifier": "MAX_FPS_DELTA",
"path": "backend/scenedetect/frame_timecode.py",
"snippet": "MAX_FPS_DELTA: float = 1.0 / 100000"
},
{
"identifier": "get_file_name",
"path": "backend/scenedetect/platform.py",
"snippet": "def get_file_name(file_path: AnyStr, include_extension=True) -> AnyStr:\n \"\"\"Return the file name that `file_path` refers to, optionally removing the extension.\n\n If `include_extension` is False, the result will always be a str.\n\n E.g. /tmp/foo.bar -> foo\"\"\"\n file_name = os.path.basename(file_path)\n if not include_extension:\n file_name = str(file_name)\n last_dot_pos = file_name.rfind('.')\n if last_dot_pos >= 0:\n file_name = file_name[:last_dot_pos]\n return file_name"
},
{
"identifier": "VideoStream",
"path": "backend/scenedetect/video_stream.py",
"snippet": "class VideoStream(ABC):\n \"\"\" Interface which all video backends must implement. \"\"\"\n\n #\n # Default Implementations\n #\n\n @property\n def base_timecode(self) -> FrameTimecode:\n \"\"\"FrameTimecode object to use as a time base.\"\"\"\n return FrameTimecode(timecode=0, fps=self.frame_rate)\n\n #\n # Abstract Static Methods\n #\n\n @staticmethod\n @abstractmethod\n def BACKEND_NAME() -> str:\n \"\"\"Unique name used to identify this backend. Should be a static property in derived\n classes (`BACKEND_NAME = 'backend_identifier'`).\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Properties\n #\n\n @property\n @abstractmethod\n def path(self) -> Union[bytes, str]:\n \"\"\"Video or device path.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def name(self) -> Union[bytes, str]:\n \"\"\"Name of the video, without extension, or device.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def is_seekable(self) -> bool:\n \"\"\"True if seek() is allowed, False otherwise.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_rate(self) -> float:\n \"\"\"Frame rate in frames/sec.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def duration(self) -> Optional[FrameTimecode]:\n \"\"\"Duration of the stream as a FrameTimecode, or None if non terminating.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_size(self) -> Tuple[int, int]:\n \"\"\"Size of each video frame in pixels as a tuple of (width, height).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def aspect_ratio(self) -> float:\n \"\"\"Pixel aspect ratio as a float (1.0 represents square pixels).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position(self) -> FrameTimecode:\n \"\"\"Current position within stream as FrameTimecode.\n\n This can be interpreted as presentation time stamp, thus frame 1 corresponds\n to the presentation time 0. Returns 0 even if `frame_number` is 1.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position_ms(self) -> float:\n \"\"\"Current position within stream as a float of the presentation time in\n milliseconds. The first frame has a PTS of 0.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_number(self) -> int:\n \"\"\"Current position within stream as the frame number.\n\n Will return 0 until the first frame is `read`.\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Methods\n #\n\n @abstractmethod\n def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:\n \"\"\"Read and decode the next frame as a numpy.ndarray. Returns False when video ends.\n\n Arguments:\n decode: Decode and return the frame.\n advance: Seek to the next frame. If False, will return the current (last) frame.\n\n Returns:\n If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.\n If decode = False, a bool indicating if advancing to the the next frame succeeded.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\" Close and re-open the VideoStream (equivalent to seeking back to beginning). \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def seek(self, target: Union[FrameTimecode, float, int]) -> None:\n \"\"\"Seek to the given timecode. If given as a frame number, represents the current seek\n pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).\n\n For 1-based indices (first frame is frame #1), the target frame number needs to be converted\n to 0-based by subtracting one. For example, if we want to seek to the first frame, we call\n seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed\n by read(), at which point frame_number will be 5.\n\n May not be supported on all backend types or inputs (e.g. cameras).\n\n Arguments:\n target: Target position in video stream to seek to.\n If float, interpreted as time in seconds.\n If int, interpreted as frame number.\n Raises:\n SeekError: An error occurs while seeking, or seeking is not supported.\n ValueError: `target` is not a valid value (i.e. it is negative).\n \"\"\"\n raise NotImplementedError"
},
{
"identifier": "SeekError",
"path": "backend/scenedetect/video_stream.py",
"snippet": "class SeekError(Exception):\n \"\"\"Either an unrecoverable error happened while attempting to seek, or the underlying\n stream is not seekable (additional information will be provided when possible).\n\n The stream is guaranteed to be left in a valid state, but the position may be reset.\"\"\""
},
{
"identifier": "VideoOpenFailure",
"path": "backend/scenedetect/video_stream.py",
"snippet": "class VideoOpenFailure(Exception):\n \"\"\"Raised by a backend if opening a video fails.\"\"\"\n\n # pylint: disable=useless-super-delegation\n def __init__(self, message: str = \"Unknown backend error.\"):\n \"\"\"\n Arguments:\n message: Additional context the backend can provide for the open failure.\n \"\"\"\n super().__init__(message)\n\n # pylint: enable=useless-super-delegation"
},
{
"identifier": "FrameRateUnavailable",
"path": "backend/scenedetect/video_stream.py",
"snippet": "class FrameRateUnavailable(VideoOpenFailure):\n \"\"\"Exception instance to provide consistent error messaging across backends when the video frame\n rate is unavailable or cannot be calculated. Subclass of VideoOpenFailure.\"\"\"\n\n def __init__(self):\n super().__init__('Unable to obtain video framerate! Specify `framerate` manually, or'\n ' re-encode/re-mux the video and try again.')"
}
] | from logging import getLogger
from typing import AnyStr, Tuple, Union, Optional
from numpy import ndarray
from backend.scenedetect.frame_timecode import FrameTimecode, MAX_FPS_DELTA
from backend.scenedetect.platform import get_file_name
from backend.scenedetect.video_stream import VideoStream, SeekError, VideoOpenFailure, FrameRateUnavailable
import math
import os.path
import cv2 | 9,093 | For 1-based indices (first frame is frame #1), the target frame number needs to be converted
to 0-based by subtracting one. For example, if we want to seek to the first frame, we call
seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed
by read(), at which point frame_number will be 5.
Not supported if the VideoStream is a device/camera. Untested with web streams.
Arguments:
target: Target position in video stream to seek to.
If float, interpreted as time in seconds.
If int, interpreted as frame number.
Raises:
SeekError: An error occurs while seeking, or seeking is not supported.
ValueError: `target` is not a valid value (i.e. it is negative).
"""
if self._is_device:
raise SeekError("Cannot seek if input is a device!")
if target < 0:
raise ValueError("Target seek position cannot be negative!")
# Have to seek one behind and call grab() after to that the VideoCapture
# returns a valid timestamp when using CAP_PROP_POS_MSEC.
target_frame_cv2 = (self.base_timecode + target).get_frames()
if target_frame_cv2 > 0:
target_frame_cv2 -= 1
self._cap.set(cv2.CAP_PROP_POS_FRAMES, target_frame_cv2)
self._has_grabbed = False
# Preemptively grab the frame behind the target position if possible.
if target > 0:
self._has_grabbed = self._cap.grab()
# If we seeked past the end of the video, need to seek one frame backwards
# from the current position and grab that frame instead.
if not self._has_grabbed:
seek_pos = round(self._cap.get(cv2.CAP_PROP_POS_FRAMES) - 1.0)
self._cap.set(cv2.CAP_PROP_POS_FRAMES, max(0, seek_pos))
self._has_grabbed = self._cap.grab()
def reset(self):
""" Close and re-open the VideoStream (should be equivalent to calling `seek(0)`). """
self._cap.release()
self._open_capture(self._frame_rate)
def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:
"""Read and decode the next frame as a numpy.ndarray. Returns False when video ends,
or the maximum number of decode attempts has passed.
Arguments:
decode: Decode and return the frame.
advance: Seek to the next frame. If False, will return the current (last) frame.
Returns:
If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.
If decode = False, a bool indicating if advancing to the the next frame succeeded.
"""
if not self._cap.isOpened():
return False
# Grab the next frame if possible.
if advance:
has_grabbed = self._cap.grab()
# If we failed to grab the frame, retry a few times if required.
if not has_grabbed:
if self.duration > 0 and self.position < (self.duration - 1):
for _ in range(self._max_decode_attempts):
has_grabbed = self._cap.grab()
if has_grabbed:
break
# Report previous failure in debug mode.
if has_grabbed:
self._decode_failures += 1
logger.debug('Frame failed to decode.')
if not self._warning_displayed and self._decode_failures > 1:
logger.warning('Failed to decode some frames, results may be inaccurate.')
# We didn't manage to grab a frame even after retrying, so just return.
if not has_grabbed:
return False
self._has_grabbed = True
# Need to make sure we actually grabbed a frame before calling retrieve.
if decode and self._has_grabbed:
_, frame = self._cap.retrieve()
return frame
return self._has_grabbed
#
# Private Methods
#
def _open_capture(self, framerate: Optional[float] = None):
"""Opens capture referenced by this object and resets internal state."""
if self._is_device and self._path_or_device < 0:
raise ValueError('Invalid/negative device ID specified.')
input_is_video_file = not self._is_device and not any(
identifier in self._path_or_device for identifier in NON_VIDEO_FILE_INPUT_IDENTIFIERS)
# We don't have a way of querying why opening a video fails (errors are logged at least),
# so provide a better error message if we try to open a file that doesn't exist.
if input_is_video_file:
if not os.path.exists(self._path_or_device):
raise OSError('Video file not found.')
cap = cv2.VideoCapture(self._path_or_device)
if not cap.isOpened():
raise VideoOpenFailure(
'Ensure file is valid video and system dependencies are up to date.\n')
# Display an error if the video codec type seems unsupported (#86) as this indicates
# potential video corruption, or may explain missing frames. We only perform this check
# for video files on-disk (skipped for devices, image sequences, streams, etc...).
codec_unsupported: bool = (int(abs(cap.get(cv2.CAP_PROP_FOURCC))) == 0)
if codec_unsupported and input_is_video_file:
logger.error('Video codec detection failed. If output is incorrect:\n'
' - Re-encode the input video with ffmpeg\n'
' - Update OpenCV (pip install --upgrade opencv-python)\n'
' - Use the PyAV backend (--backend pyav)\n'
'For details, see https://github.com/Breakthrough/PySceneDetect/issues/86')
# Ensure the framerate is correct to avoid potential divide by zero errors. This can be
# addressed in the PyAV backend if required since it supports integer timebases.
assert framerate is None or framerate > MAX_FPS_DELTA, "Framerate must be validated if set!"
if framerate is None:
framerate = cap.get(cv2.CAP_PROP_FPS)
if framerate < MAX_FPS_DELTA:
| # -*- coding: utf-8 -*-
#
# PySceneDetect: Python-Based Video Scene Detector
# -------------------------------------------------------------------
# [ Site: https://scenedetect.com ]
# [ Docs: https://scenedetect.com/docs/ ]
# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]
#
# Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>.
# PySceneDetect is licensed under the BSD 3-Clause License; see the
# included LICENSE file, or visit one of the above pages for details.
#
""":class:`VideoStreamCv2` is backed by the OpenCV `VideoCapture` object. This is the default
backend. Works with video files, image sequences, and network streams/URLs.
For wrapping input devices or pipes, there is also :class:`VideoCaptureAdapter` which can be
constructed from an existing `cv2.VideoCapture`. This allows performing scene detection on inputs
which do not support seeking.
"""
logger = getLogger('pyscenedetect')
IMAGE_SEQUENCE_IDENTIFIER = '%'
NON_VIDEO_FILE_INPUT_IDENTIFIERS = (
IMAGE_SEQUENCE_IDENTIFIER, # image sequence
'://', # URL/network stream
' ! ', # gstreamer pipe
)
def _get_aspect_ratio(cap: cv2.VideoCapture, epsilon: float = 0.0001) -> float:
"""Display/pixel aspect ratio of the VideoCapture as a float (1.0 represents square pixels)."""
# Versions of OpenCV < 3.4.1 do not support this, so we fall back to 1.0.
if not 'CAP_PROP_SAR_NUM' in dir(cv2):
return 1.0
num: float = cap.get(cv2.CAP_PROP_SAR_NUM)
den: float = cap.get(cv2.CAP_PROP_SAR_DEN)
# If numerator or denominator are close to zero, so we fall back to 1.0.
if abs(num) < epsilon or abs(den) < epsilon:
return 1.0
return num / den
class VideoStreamCv2(VideoStream):
"""OpenCV `cv2.VideoCapture` backend."""
def __init__(
self,
path: AnyStr = None,
framerate: Optional[float] = None,
max_decode_attempts: int = 5,
path_or_device: Union[bytes, str, int] = None,
):
"""Open a video file, image sequence, or network stream.
Arguments:
path: Path to the video. Can be a file, image sequence (`'folder/DSC_%04d.jpg'`),
or network stream.
framerate: If set, overrides the detected framerate.
max_decode_attempts: Number of attempts to continue decoding the video
after a frame fails to decode. This allows processing videos that
have a few corrupted frames or metadata (in which case accuracy
of detection algorithms may be lower). Once this limit is passed,
decoding will stop and emit an error.
path_or_device: [DEPRECATED] Specify `path` for files, image sequences, or
network streams/URLs. Use `VideoCaptureAdapter` for devices/pipes.
Raises:
OSError: file could not be found or access was denied
VideoOpenFailure: video could not be opened (may be corrupted)
ValueError: specified framerate is invalid
"""
super().__init__()
# TODO(v0.7): Replace with DeprecationWarning that `path_or_device` will be removed in v0.8.
if path_or_device is not None:
logger.error('path_or_device is deprecated, use path or VideoCaptureAdapter instead.')
path = path_or_device
if path is None:
raise ValueError('Path must be specified!')
if framerate is not None and framerate < MAX_FPS_DELTA:
raise ValueError('Specified framerate (%f) is invalid!' % framerate)
if max_decode_attempts < 0:
raise ValueError('Maximum decode attempts must be >= 0!')
self._path_or_device = path
self._is_device = isinstance(self._path_or_device, int)
# Initialized in _open_capture:
self._cap: Optional[
cv2.VideoCapture] = None # Reference to underlying cv2.VideoCapture object.
self._frame_rate: Optional[float] = None
# VideoCapture state
self._has_grabbed = False
self._max_decode_attempts = max_decode_attempts
self._decode_failures = 0
self._warning_displayed = False
self._open_capture(framerate)
#
# Backend-Specific Methods/Properties
#
@property
def capture(self) -> cv2.VideoCapture:
"""Returns reference to underlying VideoCapture object. Use with caution.
Prefer to use this property only to take ownership of the underlying cv2.VideoCapture object
backing this object. Seeking or using the read/grab methods through this property are
unsupported and will leave this object in an inconsistent state.
"""
assert self._cap
return self._cap
#
# VideoStream Methods/Properties
#
BACKEND_NAME = 'opencv'
"""Unique name used to identify this backend."""
@property
def frame_rate(self) -> float:
"""Framerate in frames/sec."""
assert self._frame_rate
return self._frame_rate
@property
def path(self) -> Union[bytes, str]:
"""Video or device path."""
if self._is_device:
assert isinstance(self._path_or_device, (int))
return "Device %d" % self._path_or_device
assert isinstance(self._path_or_device, (bytes, str))
return self._path_or_device
@property
def name(self) -> str:
"""Name of the video, without extension, or device."""
if self._is_device:
return self.path
file_name: str = get_file_name(self.path, include_extension=False)
if IMAGE_SEQUENCE_IDENTIFIER in file_name:
# file_name is an image sequence, trim everything including/after the %.
# TODO: This excludes any suffix after the sequence identifier.
file_name = file_name[:file_name.rfind(IMAGE_SEQUENCE_IDENTIFIER)]
return file_name
@property
def is_seekable(self) -> bool:
"""True if seek() is allowed, False otherwise.
Always False if opening a device/webcam."""
return not self._is_device
@property
def frame_size(self) -> Tuple[int, int]:
"""Size of each video frame in pixels as a tuple of (width, height)."""
return (math.trunc(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
math.trunc(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
@property
def duration(self) -> Optional[FrameTimecode]:
"""Duration of the stream as a FrameTimecode, or None if non terminating."""
if self._is_device:
return None
return self.base_timecode + math.trunc(self._cap.get(cv2.CAP_PROP_FRAME_COUNT))
@property
def aspect_ratio(self) -> float:
"""Display/pixel aspect ratio as a float (1.0 represents square pixels)."""
return _get_aspect_ratio(self._cap)
@property
def position(self) -> FrameTimecode:
"""Current position within stream as FrameTimecode.
This can be interpreted as presentation time stamp of the last frame which was
decoded by calling `read` with advance=True.
This method will always return 0 (e.g. be equal to `base_timecode`) if no frames
have been `read`."""
if self.frame_number < 1:
return self.base_timecode
return self.base_timecode + (self.frame_number - 1)
@property
def position_ms(self) -> float:
"""Current position within stream as a float of the presentation time in milliseconds.
The first frame has a time of 0.0 ms.
This method will always return 0.0 if no frames have been `read`."""
return self._cap.get(cv2.CAP_PROP_POS_MSEC)
@property
def frame_number(self) -> int:
"""Current position within stream in frames as an int.
1 indicates the first frame was just decoded by the last call to `read` with advance=True,
whereas 0 indicates that no frames have been `read`.
This method will always return 0 if no frames have been `read`."""
return math.trunc(self._cap.get(cv2.CAP_PROP_POS_FRAMES))
def seek(self, target: Union[FrameTimecode, float, int]):
"""Seek to the given timecode. If given as a frame number, represents the current seek
pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).
For 1-based indices (first frame is frame #1), the target frame number needs to be converted
to 0-based by subtracting one. For example, if we want to seek to the first frame, we call
seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed
by read(), at which point frame_number will be 5.
Not supported if the VideoStream is a device/camera. Untested with web streams.
Arguments:
target: Target position in video stream to seek to.
If float, interpreted as time in seconds.
If int, interpreted as frame number.
Raises:
SeekError: An error occurs while seeking, or seeking is not supported.
ValueError: `target` is not a valid value (i.e. it is negative).
"""
if self._is_device:
raise SeekError("Cannot seek if input is a device!")
if target < 0:
raise ValueError("Target seek position cannot be negative!")
# Have to seek one behind and call grab() after to that the VideoCapture
# returns a valid timestamp when using CAP_PROP_POS_MSEC.
target_frame_cv2 = (self.base_timecode + target).get_frames()
if target_frame_cv2 > 0:
target_frame_cv2 -= 1
self._cap.set(cv2.CAP_PROP_POS_FRAMES, target_frame_cv2)
self._has_grabbed = False
# Preemptively grab the frame behind the target position if possible.
if target > 0:
self._has_grabbed = self._cap.grab()
# If we seeked past the end of the video, need to seek one frame backwards
# from the current position and grab that frame instead.
if not self._has_grabbed:
seek_pos = round(self._cap.get(cv2.CAP_PROP_POS_FRAMES) - 1.0)
self._cap.set(cv2.CAP_PROP_POS_FRAMES, max(0, seek_pos))
self._has_grabbed = self._cap.grab()
def reset(self):
""" Close and re-open the VideoStream (should be equivalent to calling `seek(0)`). """
self._cap.release()
self._open_capture(self._frame_rate)
def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:
"""Read and decode the next frame as a numpy.ndarray. Returns False when video ends,
or the maximum number of decode attempts has passed.
Arguments:
decode: Decode and return the frame.
advance: Seek to the next frame. If False, will return the current (last) frame.
Returns:
If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.
If decode = False, a bool indicating if advancing to the the next frame succeeded.
"""
if not self._cap.isOpened():
return False
# Grab the next frame if possible.
if advance:
has_grabbed = self._cap.grab()
# If we failed to grab the frame, retry a few times if required.
if not has_grabbed:
if self.duration > 0 and self.position < (self.duration - 1):
for _ in range(self._max_decode_attempts):
has_grabbed = self._cap.grab()
if has_grabbed:
break
# Report previous failure in debug mode.
if has_grabbed:
self._decode_failures += 1
logger.debug('Frame failed to decode.')
if not self._warning_displayed and self._decode_failures > 1:
logger.warning('Failed to decode some frames, results may be inaccurate.')
# We didn't manage to grab a frame even after retrying, so just return.
if not has_grabbed:
return False
self._has_grabbed = True
# Need to make sure we actually grabbed a frame before calling retrieve.
if decode and self._has_grabbed:
_, frame = self._cap.retrieve()
return frame
return self._has_grabbed
#
# Private Methods
#
def _open_capture(self, framerate: Optional[float] = None):
"""Opens capture referenced by this object and resets internal state."""
if self._is_device and self._path_or_device < 0:
raise ValueError('Invalid/negative device ID specified.')
input_is_video_file = not self._is_device and not any(
identifier in self._path_or_device for identifier in NON_VIDEO_FILE_INPUT_IDENTIFIERS)
# We don't have a way of querying why opening a video fails (errors are logged at least),
# so provide a better error message if we try to open a file that doesn't exist.
if input_is_video_file:
if not os.path.exists(self._path_or_device):
raise OSError('Video file not found.')
cap = cv2.VideoCapture(self._path_or_device)
if not cap.isOpened():
raise VideoOpenFailure(
'Ensure file is valid video and system dependencies are up to date.\n')
# Display an error if the video codec type seems unsupported (#86) as this indicates
# potential video corruption, or may explain missing frames. We only perform this check
# for video files on-disk (skipped for devices, image sequences, streams, etc...).
codec_unsupported: bool = (int(abs(cap.get(cv2.CAP_PROP_FOURCC))) == 0)
if codec_unsupported and input_is_video_file:
logger.error('Video codec detection failed. If output is incorrect:\n'
' - Re-encode the input video with ffmpeg\n'
' - Update OpenCV (pip install --upgrade opencv-python)\n'
' - Use the PyAV backend (--backend pyav)\n'
'For details, see https://github.com/Breakthrough/PySceneDetect/issues/86')
# Ensure the framerate is correct to avoid potential divide by zero errors. This can be
# addressed in the PyAV backend if required since it supports integer timebases.
assert framerate is None or framerate > MAX_FPS_DELTA, "Framerate must be validated if set!"
if framerate is None:
framerate = cap.get(cv2.CAP_PROP_FPS)
if framerate < MAX_FPS_DELTA: | raise FrameRateUnavailable() | 6 | 2023-10-25 02:50:01+00:00 | 12k |
Genesis-Embodied-AI/RoboGen | manipulation/sim.py | [
{
"identifier": "Panda",
"path": "manipulation/panda.py",
"snippet": "class Panda(Robot):\n def __init__(self, controllable_joints='right', slider=True, floating=False):\n self.slider = slider\n self.floating = floating\n if not floating:\n if not slider:\n right_arm_joint_indices = [0, 1, 2, 3, 4, 5, 6] # Controllable arm joints\n right_end_effector = 11 # Used to get the pose of the end effector\n right_gripper_indices = [9, 10] # Gripper actuated joints\n else:\n right_arm_joint_indices = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]\n right_end_effector = 15 # Used to get the pose of the end effector\n right_gripper_indices = [13, 14] # Gripper actuated joints\n \n else:\n right_arm_joint_indices = []\n right_end_effector = -1\n right_gripper_indices = [0, 1]\n\n super(Panda, self).__init__(controllable_joints, right_arm_joint_indices, right_end_effector, right_gripper_indices)\n\n def init(self, directory, id, np_random, fixed_base=False, use_suction=True):\n self.body = p.loadURDF(os.path.join(directory, 'franka_mobile', 'panda_suction_slider_mobile.urdf'), useFixedBase=fixed_base, basePosition=[-1, -1, 0.5], flags=p.URDF_USE_SELF_COLLISION, physicsClientId=id)\n\n for i in range(p.getNumJoints(self.body, physicsClientId=id)):\n print(p.getJointInfo(self.body, i, physicsClientId=id))\n link_name = p.getJointInfo(self.body, i, physicsClientId=id)[12].decode('utf-8')\n print(\"link_name: \", link_name)\n\n super(Panda, self).init(self.body, id, np_random)"
},
{
"identifier": "UR5",
"path": "manipulation/ur5.py",
"snippet": "class UR5(Robot):\n def __init__(self, controllable_joints='right', slider=True, floating=False):\n self.slider = slider\n self.floating = floating\n if not floating:\n if not slider:\n right_arm_joint_indices = [0, 1, 2, 3, 4, 5, 6] # Controllable arm joints\n right_end_effector = 11 # Used to get the pose of the end effector\n right_gripper_indices = [9, 10] # Gripper actuated joints\n else:\n right_arm_joint_indices = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]\n right_end_effector = 21 # Used to get the pose of the end effector\n right_gripper_indices = [21, 19] # Gripper actuated joints\n \n else:\n right_arm_joint_indices = []\n right_end_effector = -1\n right_gripper_indices = [0, 1]\n\n super(UR5, self).__init__(controllable_joints, right_arm_joint_indices, right_end_effector, right_gripper_indices)\n\n def init(self, directory, id, np_random, fixed_base=False, use_suction=True):\n self.body = p.loadURDF(os.path.join(directory, 'ur5', 'ur5_robotiq85_mobile.urdf'), useFixedBase=fixed_base, basePosition=[-1, -1, 0.5], flags=p.URDF_USE_SELF_COLLISION, physicsClientId=id)\n\n for i in range(p.getNumJoints(self.body, physicsClientId=id)):\n print(p.getJointInfo(self.body, i, physicsClientId=id))\n link_name = p.getJointInfo(self.body, i, physicsClientId=id)[12].decode('utf-8')\n print(\"link_name: \", link_name)\n\n all_joint_num = p.getNumJoints(self.body)\n all_joint_idx = list(range(all_joint_num))\n joint_idx = [j for j in all_joint_idx if self._is_not_fixed(j)]\n self.right_arm_joint_indices = joint_idx\n self.controllable_joint_indices = self.right_arm_joint_indices\n\n super(UR5, self).init(self.body, id, np_random)"
},
{
"identifier": "Sawyer",
"path": "manipulation/sawyer.py",
"snippet": "class Sawyer(Robot):\n def __init__(self, controllable_joints='right', slider=True, floating=False):\n self.slider = slider\n self.floating = floating\n if not floating:\n if not slider:\n right_arm_joint_indices = [0, 1, 2, 3, 4, 5, 6] # Controllable arm joints\n right_end_effector = 11 # Used to get the pose of the end effector\n right_gripper_indices = [9, 10] # Gripper actuated joints\n else:\n right_arm_joint_indices = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]\n right_end_effector = 26 # Used to get the pose of the end effector\n right_gripper_indices = [25, 23] # Gripper actuated joints\n \n else:\n right_arm_joint_indices = []\n right_end_effector = -1\n right_gripper_indices = [0, 1]\n\n super(Sawyer, self).__init__(controllable_joints, right_arm_joint_indices, right_end_effector, right_gripper_indices)\n\n def init(self, directory, id, np_random, fixed_base=False, use_suction=True):\n self.body = p.loadURDF(os.path.join(directory, 'sawyer', 'sawyer_mobile.urdf'), useFixedBase=fixed_base, basePosition=[-1, -1, 0.5], flags=p.URDF_USE_SELF_COLLISION, physicsClientId=id)\n\n for i in range(p.getNumJoints(self.body, physicsClientId=id)):\n print(p.getJointInfo(self.body, i, physicsClientId=id))\n link_name = p.getJointInfo(self.body, i, physicsClientId=id)[12].decode('utf-8')\n print(\"link_name: \", link_name)\n\n all_joint_num = p.getNumJoints(self.body)\n all_joint_idx = list(range(all_joint_num))\n joint_idx = [j for j in all_joint_idx if self._is_not_fixed(j)]\n self.right_arm_joint_indices = joint_idx\n self.controllable_joint_indices = self.right_arm_joint_indices\n print(\"joint_idx: \", joint_idx)\n\n super(Sawyer, self).init(self.body, id, np_random)"
},
{
"identifier": "parse_config",
"path": "manipulation/utils.py",
"snippet": "def parse_config(config, use_bard=True, obj_id=None, use_gpt_size=True, use_vhacd=True):\n urdf_paths = []\n urdf_sizes = []\n urdf_locations = []\n urdf_names = []\n urdf_types = []\n urdf_on_tables = []\n urdf_movables = []\n use_table = False\n articulated_joint_angles = {}\n spatial_relationships = []\n distractor_config_path = None\n\n for obj in config:\n print(obj)\n\n if \"use_table\" in obj.keys():\n use_table = obj['use_table']\n\n if \"set_joint_angle_object_name\" in obj.keys():\n new_obj = copy.deepcopy(obj)\n new_obj.pop('set_joint_angle_object_name')\n articulated_joint_angles[obj['set_joint_angle_object_name']] = new_obj\n\n if \"spatial_relationships\" in obj.keys():\n spatial_relationships = obj['spatial_relationships']\n\n if 'task_name' in obj.keys() or 'task_description' in obj.keys():\n continue\n\n if \"distractor_config_path\" in obj.keys():\n distractor_config_path = obj['distractor_config_path']\n\n if \"type\" not in obj.keys():\n continue\n \n if obj['type'] == 'mesh':\n if 'uid' not in obj.keys():\n continue\n if obj_id is None:\n uid = obj['uid'][np.random.randint(len(obj['uid']))]\n else:\n uid = obj['uid'][obj_id]\n \n urdf_file_path = osp.join(\"objaverse_utils/data/obj\", \"{}\".format(uid), \"material.urdf\")\n if not os.path.exists(urdf_file_path):\n down_load_single_object(name=obj['lang'], uids=[uid])\n \n new_urdf_file_path = urdf_file_path.replace(\"material.urdf\", \"material_non_vhacd.urdf\")\n new_urdf_lines = []\n with open(urdf_file_path, 'r') as f:\n urdf_lines = f.readlines()\n for line in urdf_lines:\n if 'vhacd' in line:\n new_line = line.replace(\"_vhacd\", \"\")\n new_urdf_lines.append(new_line)\n else:\n new_urdf_lines.append(line)\n with open(new_urdf_file_path, 'w') as f:\n f.writelines(new_urdf_lines)\n urdf_file_path = new_urdf_file_path\n print(\"object {} choosing uid {} urdf_path {}\".format(obj['lang'], uid, urdf_file_path))\n\n urdf_paths.append(urdf_file_path)\n urdf_types.append('mesh')\n urdf_movables.append(True) # all mesh objects are movable\n \n elif obj['type'] == 'urdf':\n try:\n category = obj['lang']\n possible_obj_path = partnet_mobility_dict[category]\n except:\n category = obj['name']\n if category == 'Computer display':\n category = 'Display'\n possible_obj_path = partnet_mobility_dict[category]\n \n if 'reward_asset_path' not in obj.keys():\n obj_path = np.random.choice(possible_obj_path)\n if category == 'Toaster':\n obj_path = str(103486)\n if category == 'Microwave':\n obj_path = str(7310)\n if category == \"Oven\":\n obj_path = str(101808)\n if category == 'Refrigerator':\n obj_path = str(10638)\n else:\n obj_path = obj['reward_asset_path']\n urdf_file_path = osp.join(\"data/dataset\", obj_path, \"mobility.urdf\")\n if use_vhacd:\n new_urdf_file_path = urdf_file_path.replace(\"mobility.urdf\", \"mobility_vhacd.urdf\")\n if not osp.exists(new_urdf_file_path):\n new_urdf_file_path = preprocess_urdf(urdf_file_path)\n urdf_paths.append(new_urdf_file_path)\n else:\n urdf_paths.append(urdf_file_path)\n\n urdf_types.append('urdf')\n urdf_movables.append(obj.get('movable', False)) # by default, urdf objects are not movable, unless specified\n\n urdf_sizes.append(obj['size'])\n urdf_locations.append(parse_center(obj['center']))\n urdf_names.append(obj['name'])\n urdf_on_tables.append(obj.get('on_table', False))\n\n return urdf_paths, urdf_sizes, urdf_locations, urdf_names, urdf_types, urdf_on_tables, use_table, \\\n articulated_joint_angles, spatial_relationships, distractor_config_path, urdf_movables"
},
{
"identifier": "load_env",
"path": "manipulation/utils.py",
"snippet": "def load_env(env, load_path=None, state=None):\n\n if load_path is not None:\n with open(load_path, 'rb') as f:\n state = pickle.load(f)\n \n ### set env to stored object position and orientation\n for obj_name, obj_id in env.urdf_ids.items():\n p.resetBasePositionAndOrientation(obj_id, state['object_base_position'][obj_name], state['object_base_orientation'][obj_name], physicsClientId=env.id)\n\n ### set env to stored object joint angles\n for obj_name, obj_id in env.urdf_ids.items():\n num_links = p.getNumJoints(obj_id, physicsClientId=env.id)\n for link_idx in range(0, num_links):\n joint_angle = state['object_joint_angle_dicts'][obj_name][link_idx]\n p.resetJointState(obj_id, link_idx, joint_angle, physicsClientId=env.id)\n\n ### recover suction\n env.activated = state['activated']\n if state['activated']:\n env.suction_obj_id = state['suction_object_id']\n env.suction_contact_link = state['suction_contact_link']\n env.suction_to_obj_pose = state['suction_to_obj_pose']\n env.create_suction_constraint(env.suction_obj_id, env.suction_contact_link, env.suction_to_obj_pose)\n\n if \"urdf_paths\" in state:\n env.urdf_paths = state[\"urdf_paths\"]\n\n if \"object_sizes\" in state:\n env.simulator_sizes = state[\"object_sizes\"]\n\n if \"robot_name\" in state:\n env.robot_name = state[\"robot_name\"]\n\n if \"table_path\" in state and env.use_table:\n env.table_path = state[\"table_path\"]\n\n return state"
},
{
"identifier": "download_and_parse_objavarse_obj_from_yaml_config",
"path": "manipulation/utils.py",
"snippet": "def download_and_parse_objavarse_obj_from_yaml_config(config_path, candidate_num=10, vhacd=True):\n\n config = None\n while config is None:\n with open(config_path, 'r') as file:\n config = yaml.safe_load(file)\n\n task_name = None\n task_description = None\n for obj in config:\n if 'task_name' in obj.keys():\n task_name = obj['task_name']\n task_description = obj['task_description']\n break\n\n for obj in config:\n if 'type' in obj.keys() and obj['type'] == 'mesh' and 'uid' not in obj.keys():\n print(\"{} trying to download object: {} {}\".format(\"=\" * 20, obj['lang'], \"=\" * 20))\n success = down_load_single_object(obj[\"lang\"], candidate_num=candidate_num, vhacd=vhacd, \n task_name=task_name, task_description=task_description)\n if not success:\n print(\"failed to find suitable object to download {} quit building this task\".format(obj[\"lang\"]))\n return False\n obj['uid'] = text_to_uid_dict[obj[\"lang\"]]\n obj['all_uid'] = text_to_uid_dict[obj[\"lang\"] + \"_all\"]\n\n with open(config_path, 'w') as f:\n yaml.dump(config, f, indent=4)\n\n return True"
},
{
"identifier": "get_joint_id_from_name",
"path": "manipulation/gpt_reward_api.py",
"snippet": "def get_joint_id_from_name(simulator, object_name, joint_name):\n object_id = simulator.urdf_ids[object_name]\n num_joints = p.getNumJoints(object_id, physicsClientId=simulator.id)\n joint_index = None\n for i in range(num_joints):\n joint_info = p.getJointInfo(object_id, i, physicsClientId=simulator.id)\n if joint_info[1].decode(\"utf-8\") == joint_name:\n joint_index = i\n break\n\n return joint_index"
},
{
"identifier": "get_link_id_from_name",
"path": "manipulation/gpt_reward_api.py",
"snippet": "def get_link_id_from_name(simulator, object_name, link_name):\n object_id = simulator.urdf_ids[object_name]\n num_joints = p.getNumJoints(object_id, physicsClientId=simulator.id)\n joint_index = None\n for i in range(num_joints):\n joint_info = p.getJointInfo(object_id, i, physicsClientId=simulator.id)\n if joint_info[12].decode(\"utf-8\") == link_name:\n joint_index = i\n break\n\n return joint_index"
}
] | import numpy as np
import pybullet as p
import gym
import pickle
import yaml
import os.path as osp
from gym.utils import seeding
from gym import spaces
from collections import defaultdict
from scipy.spatial.transform import Rotation as R
from manipulation.panda import Panda
from manipulation.ur5 import UR5
from manipulation.sawyer import Sawyer
from manipulation.utils import parse_config, load_env, download_and_parse_objavarse_obj_from_yaml_config
from manipulation.gpt_reward_api import get_joint_id_from_name, get_link_id_from_name
from manipulation.table_utils import table_paths, table_scales, table_poses, table_bbox_scale_down_factors | 7,486 | skip = False
for spatial_relationship in spatial_relationships:
words = spatial_relationship.lower().split(",")
words = [word.strip().lstrip() for word in words]
if name in words and name2 in words:
skip = True
break
if skip: continue
contact_points = p.getClosestPoints(id, id2, 0.01, physicsClientId=self.id)
if len(contact_points) > 0:
contact_point = contact_points[0]
push_direction = contact_point[7]
push_direction = np.array([push_direction[0], push_direction[1], push_direction[2]])
# both are distractors or both are not, push both objects away
if (self.is_distractor[name] and self.is_distractor[name2]) or \
(not self.is_distractor[name] and not self.is_distractor[name2]):
push_directions[id].append(-push_direction)
push_directions[id2].append(push_direction)
# only 1 is distractor, only pushes the distractor
if self.is_distractor[name] and not self.is_distractor[name2]:
push_directions[id].append(push_direction)
if not self.is_distractor[name] and self.is_distractor[name2]:
push_directions[id2].append(-push_direction)
detected_collision = True
# collisions between robot and objects, only push object away
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == 'init_table':
continue
contact_points = p.getClosestPoints(self.robot.body, id, 0.05, physicsClientId=self.id)
if len(contact_points) > 0:
contact_point = contact_points[0]
push_direction = contact_point[7]
push_direction = np.array([push_direction[0], push_direction[1], push_direction[2]])
push_directions[id].append(-push_direction)
detected_collision = True
# between table and objects that should not be placed on table
if self.use_table:
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == 'init_table':
continue
if self.on_tables[name]:
continue
contact_points = p.getClosestPoints(self.robot.body, id, 0.05, physicsClientId=self.id)
if len(contact_points) > 0:
contact_point = contact_points[0]
push_direction = contact_point[7]
push_direction = np.array([push_direction[0], push_direction[1], push_direction[2]])
push_directions[id].append(-push_direction)
detected_collision = True
# move objects
push_distance = 0.1
for id in push_directions:
for direction in push_directions[id]:
pos, orient = p.getBasePositionAndOrientation(id, physicsClientId=self.id)
new_pos = np.array(pos) + push_distance * direction
new_pos = self.clip_within_workspace(robot_base_pos, new_pos, self.on_tables[name])
new_pos[2] = object_height[id]
p.resetBasePositionAndOrientation(id, new_pos, orient, physicsClientId=self.id)
p.stepSimulation(physicsClientId=self.id)
collision = detected_collision
collision_cnt += 1
if collision_cnt > 1000:
break
def record_initial_joint_and_pose(self):
self.initial_joint_angle = {}
for name in self.urdf_ids:
obj_id = self.urdf_ids[name.lower()]
if name == 'robot' or name == 'plane' or name == "init_table": continue
if self.urdf_types[name.lower()] == 'urdf':
self.initial_joint_angle[name] = {}
num_joints = p.getNumJoints(obj_id, physicsClientId=self.id)
for joint_idx in range(num_joints):
joint_name = p.getJointInfo(obj_id, joint_idx, physicsClientId=self.id)[1].decode("utf-8")
joint_angle = p.getJointState(obj_id, joint_idx, physicsClientId=self.id)[0]
self.initial_joint_angle[name][joint_name] = joint_angle
self.initial_pos = {}
self.initial_orient = {}
for name in self.urdf_ids:
obj_id = self.urdf_ids[name.lower()]
if name == 'robot' or name == 'plane' or name == "init_table": continue
pos, orient = p.getBasePositionAndOrientation(obj_id, physicsClientId=self.id)
self.initial_pos[name] = pos
self.initial_orient[name] = orient
def set_to_default_joint_angles(self):
for obj_name in self.urdf_ids:
if obj_name == 'robot' or obj_name == 'plane' or obj_name == "init_table": continue
obj_id = self.urdf_ids[obj_name]
num_joints = p.getNumJoints(obj_id, physicsClientId=self.id)
for joint_idx in range(num_joints):
joint_limit_low, joint_limit_high = p.getJointInfo(obj_id, joint_idx, physicsClientId=self.id)[8:10]
if joint_limit_low > joint_limit_high:
joint_limit_low, joint_limit_high = joint_limit_high, joint_limit_low
joint_val = joint_limit_low + 0.06 * (joint_limit_high - joint_limit_low)
p.resetJointState(obj_id, joint_idx, joint_val, physicsClientId=self.id)
def handle_gpt_special_relationships(self, spatial_relationships):
# we support "on" and "in" for now, but this can be extended to more relationships
for spatial_relationship in spatial_relationships:
words = spatial_relationship.lower().split(",")
words = [word.strip().lstrip() for word in words]
if words[0] == "on":
obj_a = words[1]
obj_b = words[2]
if len(words) == 4:
obj_b_link = words[3]
|
class SimpleEnv(gym.Env):
def __init__(self,
dt=0.01,
config_path=None,
gui=False,
frameskip=2,
horizon=120,
restore_state_file=None,
rotation_mode='delta-axis-angle-local',
translation_mode='delta-translation',
max_rotation=np.deg2rad(5),
max_translation=0.15,
use_suction=True, # whether to use a suction gripper
object_candidate_num=6, # how many candidate objects to sample from objaverse
vhacd=False, # if to perform vhacd on the object for better collision detection for pybullet
randomize=0, # if to randomize the scene
obj_id=0, # which object to choose to use from the candidates
):
super().__init__()
# Task
self.config_path = config_path
self.restore_state_file = restore_state_file
self.frameskip = frameskip
self.horizon = horizon
self.gui = gui
self.object_candidate_num = object_candidate_num
self.solution_path = None
self.success = False # not really used, keeped for now
self.primitive_save_path = None # to be used for saving the primitives execution results
self.randomize = randomize
self.obj_id = obj_id # which object to choose to use from the candidates
# physics
self.gravity = -9.81
self.contact_constraint = None
self.vhacd = vhacd
# action space
self.use_suction = use_suction
self.rotation_mode = rotation_mode
self.translation_mode = translation_mode
self.max_rotation_angle = max_rotation
self.max_translation = max_translation
self.suction_to_obj_pose = 0
self.suction_contact_link = None
self.suction_obj_id = None
self.activated = 0
if self.gui:
try:
self.id = p.connect(p.GUI)
except:
self.id = p.connect(p.DIRECT)
else:
self.id = p.connect(p.DIRECT)
self.asset_dir = osp.join(osp.dirname(osp.realpath(__file__)), "assets/")
hz=int(1/dt)
p.setTimeStep(1.0 / hz, physicsClientId=self.id)
self.seed()
self.set_scene()
self.setup_camera_rpy()
self.scene_lower, self.scene_upper = self.get_scene_bounds()
self.scene_center = (self.scene_lower + self.scene_upper) / 2
self.scene_range = (self.scene_upper - self.scene_lower) / 2
self.grasp_action_mag = 0.06 if not self.use_suction else 1
self.action_low = np.array([-1, -1, -1, -1, -1, -1, -1])
self.action_high = np.array([1, 1, 1, 1, 1, 1, self.grasp_action_mag])
self.action_space = spaces.Box(low=self.action_low, high=self.action_high, dtype=np.float32)
self.base_action_space = spaces.Box(low=self.action_low, high=self.action_high, dtype=np.float32)
self.num_objects = len(self.urdf_ids) - 2 # exclude plane, robot
distractor_object_num = np.sum(list(self.is_distractor.values()))
self.num_objects -= distractor_object_num
### For RL policy learning, observation space includes:
# 1. object positions and orientations (6 * num_objects)
# 2. object min and max bounding box (6 * num_objects)
# 3. articulated object joint angles (num_objects * num_joints)
# 4. articulated object link position and orientation (num_objects * num_joints * 6)
# 5. robot base position (xy)
# 6. robot end-effector position and orientation (6)
# 7. gripper suction activated/deactivate or gripper joint angle (if not using suction gripper) (1)
num_obs = self.num_objects * 12 # obs 1 and 2
for name in self.urdf_types:
if self.urdf_types[name] == 'urdf' and not self.is_distractor[name]: # obs 3 and 4
num_joints = p.getNumJoints(self.urdf_ids[name], physicsClientId=self.id)
num_obs += num_joints
num_obs += 6 * num_joints
num_obs += 2 + 6 + 1 # obs 5 6 7
self.base_num_obs = num_obs
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(num_obs, ), dtype=np.float32)
self.base_observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(self.base_num_obs, ), dtype=np.float32)
self.detected_position = {} # not used for now, keep it
def normalize_position(self, pos):
if self.translation_mode == 'normalized-direct-translation':
return (pos - self.scene_center) / self.scene_range
else:
return pos
def seed(self, seed=None):
self.np_random, _ = seeding.np_random()
def get_aabb(self, id):
num_joints = p.getNumJoints(id, physicsClientId=self.id)
min_aabbs, max_aabbs = [], []
for link_idx in range(-1, num_joints):
min_aabb, max_aabb = p.getAABB(id, link_idx, physicsClientId=self.id)
min_aabbs.append(list(min_aabb))
max_aabbs.append(list(max_aabb))
min_aabb = np.min(np.concatenate(min_aabbs, axis=0).reshape(-1, 3), axis=0)
max_aabb = np.max(np.concatenate(max_aabbs, axis=0).reshape(-1, 3), axis=0)
return min_aabb, max_aabb
def get_aabb_link(self, id, link_id):
min_aabb, max_aabb = p.getAABB(id, link_id, physicsClientId=self.id)
return np.array(min_aabb), np.array(max_aabb)
def get_scene_bounds(self):
min_aabbs = []
max_aabbs = []
for name, id in self.urdf_ids.items():
if name == 'plane': continue
min_aabb, max_aabb = self.get_aabb(id)
min_aabbs.append(min_aabb)
max_aabbs.append(max_aabb)
min_aabb = np.min(np.stack(min_aabbs, axis=0).reshape(-1, 3), axis=0)
max_aabb = np.max(np.stack(max_aabbs, axis=0).reshape(-1, 3), axis=0)
range = max_aabb - min_aabb
return min_aabb - 0.5 * range, max_aabb + 0.5 * range
def clip_within_workspace(self, robot_pos, ori_pos, on_table):
pos = ori_pos.copy()
if not on_table:
# If objects are too close to the robot, push them away
x_near_low, x_near_high = robot_pos[0] - 0.3, robot_pos[0] + 0.3
y_near_low, y_near_high = robot_pos[1] - 0.3, robot_pos[1] + 0.3
if pos[0] > x_near_low and pos[0] < x_near_high:
pos[0] = x_near_low if pos[0] < robot_pos[0] else x_near_high
if pos[1] > y_near_low and pos[1] < y_near_high:
pos[1] = y_near_low if pos[1] < robot_pos[1] else y_near_high
return pos
else:
# Object is on table, should be within table's bounding box
new_pos = pos.copy()
new_pos[:2] = np.clip(new_pos[:2], self.table_bbox_min[:2], self.table_bbox_max[:2])
return new_pos
def get_robot_base_pos(self):
robot_base_pos = [1, 1, 0.28]
return robot_base_pos
def get_robot_init_joint_angles(self):
init_joint_angles = [0 for _ in range(len(self.robot.right_arm_joint_indices))]
if self.robot_name == 'panda':
init_joint_angles = [0, -1.10916842e-04, 7.33823451e-05, -5.47701370e-01, -5.94950533e-01,
2.62857916e+00, -4.85316284e-01, 1.96042022e+00, 2.15271531e+00,
-7.35304443e-01]
return init_joint_angles
def set_scene(
self,
):
### simulation preparation
p.resetSimulation(physicsClientId=self.id)
if self.gui:
p.resetDebugVisualizerCamera(cameraDistance=1.75, cameraYaw=-25, cameraPitch=-45, cameraTargetPosition=[-0.2, 0, 0.4], physicsClientId=self.id)
p.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, 0, physicsClientId=self.id)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0, physicsClientId=self.id)
p.setRealTimeSimulation(0, physicsClientId=self.id)
p.setGravity(0, 0, self.gravity, physicsClientId=self.id)
### load restore state
restore_state = None
if self.restore_state_file is not None:
with open(self.restore_state_file, 'rb') as f:
restore_state = pickle.load(f)
### load plane
planeId = p.loadURDF(osp.join(self.asset_dir, "plane", "plane.urdf"), physicsClientId=self.id)
### create and load a robot
robot_base_pos = self.load_robot(restore_state)
### load and parse task config (including semantically meaningful distractor objects)
self.urdf_ids = {
"robot": self.robot.body,
"plane": planeId,
}
self.urdf_paths = {}
self.urdf_types = {}
self.init_positions = {}
self.on_tables = {}
self.simulator_sizes = {}
self.is_distractor = {
"robot": 0,
"plane": 0,
}
urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, urdf_movables, \
use_table, articulated_init_joint_angles, spatial_relationships = self.load_and_parse_config(restore_state)
### handle the case if there is a table
self.load_table(use_table, restore_state)
### load each object from the task config
self.load_object(urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, urdf_movables)
### adjusting object positions
### place the lowest point on the object to be the height where GPT specifies
object_height = self.adjust_object_positions(robot_base_pos)
### resolve collisions between objects
self.resolve_collision(robot_base_pos, object_height, spatial_relationships)
### handle any special relationships outputted by GPT
self.handle_gpt_special_relationships(spatial_relationships)
### set all object's joint angles to the lower joint limit
self.set_to_default_joint_angles()
### overwrite joint angles specified by GPT
self.handle_gpt_joint_angle(articulated_init_joint_angles)
### record initial joint angles and positions
self.record_initial_joint_and_pose()
### stabilize the scene
for _ in range(500):
p.stepSimulation(physicsClientId=self.id)
### restore to a state if provided
if self.restore_state_file is not None:
load_env(self, self.restore_state_file)
### Enable debug rendering
if self.gui:
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)
self.init_state = p.saveState(physicsClientId=self.id)
def load_robot(self, restore_state):
robot_classes = {
"panda": Panda,
"sawyer": Sawyer,
"ur5": UR5,
}
robot_names = list(robot_classes.keys())
self.robot_name = robot_names[np.random.randint(len(robot_names))]
if restore_state is not None and "robot_name" in restore_state:
self.robot_name = restore_state['robot_name']
self.robot_class = robot_classes[self.robot_name]
# Create robot
self.robot = self.robot_class()
self.robot.init(self.asset_dir, self.id, self.np_random, fixed_base=True, use_suction=self.use_suction)
self.agents = [self.robot]
self.suction_id = self.robot.right_gripper_indices[0]
# Update robot motor gains
self.robot.motor_gains = 0.05
self.robot.motor_forces = 100.0
# Set robot base position & orientation, and joint angles
robot_base_pos = self.get_robot_base_pos()
robot_base_orient = [0, 0, 0, 1]
self.robot_base_orient = robot_base_orient
self.robot.set_base_pos_orient(robot_base_pos, robot_base_orient)
init_joint_angles = self.get_robot_init_joint_angles()
self.robot.set_joint_angles(self.robot.right_arm_joint_indices, init_joint_angles)
return robot_base_pos
def load_and_parse_config(self, restore_state):
### select and download objects from objaverse
res = download_and_parse_objavarse_obj_from_yaml_config(self.config_path, candidate_num=self.object_candidate_num, vhacd=self.vhacd)
if not res:
print("=" * 20)
print("some objects cannot be found in objaverse, task_build failed, now exit ...")
print("=" * 20)
exit()
self.config = None
while self.config is None:
with open(self.config_path, 'r') as file:
self.config = yaml.safe_load(file)
for obj in self.config:
if "solution_path" in obj:
self.solution_path = obj["solution_path"]
break
### parse config
urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, use_table, \
articulated_init_joint_angles, spatial_relationships, distractor_config_path, urdf_movables = parse_config(self.config,
use_bard=True, obj_id=self.obj_id)
if not use_table:
urdf_on_table = [False for _ in urdf_on_table]
urdf_names = [x.lower() for x in urdf_names]
for name in urdf_names:
self.is_distractor[name] = 0
### parse distractor object config (semantically meaningful objects that are related but not used for the task)
if distractor_config_path is not None:
self.distractor_config_path = distractor_config_path
res = download_and_parse_objavarse_obj_from_yaml_config(distractor_config_path, candidate_num=self.object_candidate_num, vhacd=self.vhacd)
with open(distractor_config_path, 'r') as f:
self.distractor_config = yaml.safe_load(f)
distractor_urdf_paths, distractor_urdf_sizes, distractor_urdf_positions, distractor_urdf_names, distractor_urdf_types, \
distractor_urdf_on_table, _, _, _, _, _ = \
parse_config(self.distractor_config, use_bard=True, obj_id=self.obj_id, use_vhacd=False)
distractor_urdf_names = [x.lower() for x in distractor_urdf_names]
if not use_table:
distractor_urdf_on_table = [False for _ in distractor_urdf_on_table]
for name in distractor_urdf_names:
self.is_distractor[name] = 1
distractor_movables = [True for _ in distractor_urdf_names]
urdf_paths += distractor_urdf_paths
urdf_sizes += distractor_urdf_sizes
urdf_positions += distractor_urdf_positions
urdf_names += distractor_urdf_names
urdf_types += distractor_urdf_types
urdf_on_table += distractor_urdf_on_table
urdf_movables += distractor_movables
if restore_state is not None:
if "urdf_paths" in restore_state:
self.urdf_paths = restore_state['urdf_paths']
urdf_paths = [self.urdf_paths[name] for name in urdf_names]
if "object_sizes" in restore_state:
self.simulator_sizes = restore_state['object_sizes']
urdf_sizes = [self.simulator_sizes[name] for name in urdf_names]
return urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, urdf_movables, \
use_table, articulated_init_joint_angles, spatial_relationships
def load_table(self, use_table, restore_state):
self.use_table = use_table
if use_table:
self.table_path = table_paths[np.random.randint(len(table_paths))]
if restore_state is not None:
self.table_path = restore_state['table_path']
table_scale = table_scales[self.table_path]
table_pos = table_poses[self.table_path]
table_orientation = [np.pi/2, 0, 0]
self.table = p.loadURDF(osp.join(self.asset_dir, self.table_path, "material.urdf"), physicsClientId=self.id, useFixedBase=True,
globalScaling=table_scale)
if not self.randomize:
random_orientation = p.getQuaternionFromEuler(table_orientation, physicsClientId=self.id)
else:
random_orientations = [0, np.pi / 2, np.pi, np.pi * 3 / 2]
random_orientation = p.getQuaternionFromEuler([np.pi/2, 0, random_orientations[np.random.randint(4)]], physicsClientId=self.id)
p.resetBasePositionAndOrientation(self.table, table_pos, random_orientation, physicsClientId=self.id)
self.table_bbox_min, self.table_bbox_max = self.get_aabb(self.table)
table_range = self.table_bbox_max - self.table_bbox_min
self.table_bbox_min[:2] += table_range[:2] * table_bbox_scale_down_factors[self.table_path]
self.table_bbox_max[:2] -= table_range[:2] * table_bbox_scale_down_factors[self.table_path]
self.table_height = self.table_bbox_max[2]
p.addUserDebugLine([*self.table_bbox_min[:2], self.table_height], self.table_bbox_max, [1, 0, 0], lineWidth=10, lifeTime=0, physicsClientId=self.id)
self.simulator_sizes["init_table"] = table_scale
self.urdf_ids["init_table"] = self.table
self.is_distractor['init_table'] = 0
def load_object(self, urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, urdf_movables):
for path, size, pos, name, type, on_table, moveable in zip(urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, urdf_movables):
name = name.lower()
# by default, all objects movable, except the urdf files
use_fixed_base = (type == 'urdf' and not self.is_distractor[name])
if type == 'urdf' and moveable: # if gpt specified the object is movable, then it is movable
use_fixed_base = False
size = min(size, 1.2)
size = max(size, 0.1) # if the object is too small, current gripper cannot really manipulate it.
x_orient = np.pi/2 if type == 'mesh' else 0 # handle different coordinate axis by objaverse and partnet-mobility
if self.randomize or self.is_distractor[name]:
orientation = p.getQuaternionFromEuler([x_orient, 0, self.np_random.uniform(-np.pi/3, np.pi/3)], physicsClientId=self.id)
else:
orientation = p.getQuaternionFromEuler([x_orient, 0, 0], physicsClientId=self.id)
if not on_table:
load_pos = pos
else: # change to be table coordinate
table_xy_range = self.table_bbox_max[:2] - self.table_bbox_min[:2]
obj_x = self.table_bbox_min[0] + pos[0] * table_xy_range[0]
obj_y = self.table_bbox_min[1] + pos[1] * table_xy_range[1]
obj_z = self.table_height
load_pos = [obj_x, obj_y, obj_z]
id = p.loadURDF(path, basePosition=load_pos, baseOrientation=orientation, physicsClientId=self.id, useFixedBase=use_fixed_base, globalScaling=size)
# scale size
if name in self.simulator_sizes:
p.removeBody(id, physicsClientId=self.id)
saved_size = self.simulator_sizes[name]
id = p.loadURDF(path, basePosition=load_pos, baseOrientation=orientation, physicsClientId=self.id, useFixedBase=use_fixed_base, globalScaling=saved_size)
else:
min_aabb, max_aabb = self.get_aabb(id)
actual_size = np.linalg.norm(max_aabb - min_aabb)
if np.abs(actual_size - size) > 0.05:
p.removeBody(id, physicsClientId=self.id)
id = p.loadURDF(path, basePosition=load_pos, baseOrientation=orientation, physicsClientId=self.id, useFixedBase=use_fixed_base, globalScaling=size ** 2 / actual_size)
self.simulator_sizes[name] = size ** 2 / actual_size
else:
self.simulator_sizes[name] = size
self.urdf_ids[name] = id
self.urdf_paths[name] = path
self.urdf_types[name] = type
self.init_positions[name] = np.array(load_pos)
self.on_tables[name] = on_table
print("Finished loading object: ", name)
def adjust_object_positions(self, robot_base_pos):
object_height = {}
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == 'init_table': continue
min_aabb, max_aabb = self.get_aabb(id)
min_z = min_aabb[2]
object_height[id] = 2 * self.init_positions[name][2] - min_z
pos, orient = p.getBasePositionAndOrientation(id, physicsClientId=self.id)
new_pos = np.array(pos)
new_pos = self.clip_within_workspace(robot_base_pos, new_pos, self.on_tables[name])
new_pos[2] = object_height[id]
p.resetBasePositionAndOrientation(id, new_pos, orient, physicsClientId=self.id)
self.init_positions[name] = new_pos
return object_height
def resolve_collision(self, robot_base_pos, object_height, spatial_relationships):
collision = True
collision_cnt = 1
while collision:
if collision_cnt % 50 == 0: # if collision is not resolved every 50 iterations, we randomly reset object's position
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == "init_table": continue
pos = self.init_positions[name]
_, orient = p.getBasePositionAndOrientation(id, physicsClientId=self.id)
new_pos = np.array(pos) + np.random.uniform(-0.2, 0.2, size=3)
new_pos = self.clip_within_workspace(robot_base_pos, new_pos, self.on_tables[name])
new_pos[2] = object_height[id]
p.resetBasePositionAndOrientation(id, new_pos, orient, physicsClientId=self.id)
p.stepSimulation(physicsClientId=self.id)
push_directions = defaultdict(list) # store the push direction for each object
# detect collisions between objects
detected_collision = False
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == 'init_table': continue
for name2, id2 in self.urdf_ids.items():
if name == name2 or name2 == 'robot' or name2 == 'plane' or name2 == 'init_table': continue
# if gpt specifies obj a and obj b should have some special relationship, then skip collision resolution
skip = False
for spatial_relationship in spatial_relationships:
words = spatial_relationship.lower().split(",")
words = [word.strip().lstrip() for word in words]
if name in words and name2 in words:
skip = True
break
if skip: continue
contact_points = p.getClosestPoints(id, id2, 0.01, physicsClientId=self.id)
if len(contact_points) > 0:
contact_point = contact_points[0]
push_direction = contact_point[7]
push_direction = np.array([push_direction[0], push_direction[1], push_direction[2]])
# both are distractors or both are not, push both objects away
if (self.is_distractor[name] and self.is_distractor[name2]) or \
(not self.is_distractor[name] and not self.is_distractor[name2]):
push_directions[id].append(-push_direction)
push_directions[id2].append(push_direction)
# only 1 is distractor, only pushes the distractor
if self.is_distractor[name] and not self.is_distractor[name2]:
push_directions[id].append(push_direction)
if not self.is_distractor[name] and self.is_distractor[name2]:
push_directions[id2].append(-push_direction)
detected_collision = True
# collisions between robot and objects, only push object away
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == 'init_table':
continue
contact_points = p.getClosestPoints(self.robot.body, id, 0.05, physicsClientId=self.id)
if len(contact_points) > 0:
contact_point = contact_points[0]
push_direction = contact_point[7]
push_direction = np.array([push_direction[0], push_direction[1], push_direction[2]])
push_directions[id].append(-push_direction)
detected_collision = True
# between table and objects that should not be placed on table
if self.use_table:
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == 'init_table':
continue
if self.on_tables[name]:
continue
contact_points = p.getClosestPoints(self.robot.body, id, 0.05, physicsClientId=self.id)
if len(contact_points) > 0:
contact_point = contact_points[0]
push_direction = contact_point[7]
push_direction = np.array([push_direction[0], push_direction[1], push_direction[2]])
push_directions[id].append(-push_direction)
detected_collision = True
# move objects
push_distance = 0.1
for id in push_directions:
for direction in push_directions[id]:
pos, orient = p.getBasePositionAndOrientation(id, physicsClientId=self.id)
new_pos = np.array(pos) + push_distance * direction
new_pos = self.clip_within_workspace(robot_base_pos, new_pos, self.on_tables[name])
new_pos[2] = object_height[id]
p.resetBasePositionAndOrientation(id, new_pos, orient, physicsClientId=self.id)
p.stepSimulation(physicsClientId=self.id)
collision = detected_collision
collision_cnt += 1
if collision_cnt > 1000:
break
def record_initial_joint_and_pose(self):
self.initial_joint_angle = {}
for name in self.urdf_ids:
obj_id = self.urdf_ids[name.lower()]
if name == 'robot' or name == 'plane' or name == "init_table": continue
if self.urdf_types[name.lower()] == 'urdf':
self.initial_joint_angle[name] = {}
num_joints = p.getNumJoints(obj_id, physicsClientId=self.id)
for joint_idx in range(num_joints):
joint_name = p.getJointInfo(obj_id, joint_idx, physicsClientId=self.id)[1].decode("utf-8")
joint_angle = p.getJointState(obj_id, joint_idx, physicsClientId=self.id)[0]
self.initial_joint_angle[name][joint_name] = joint_angle
self.initial_pos = {}
self.initial_orient = {}
for name in self.urdf_ids:
obj_id = self.urdf_ids[name.lower()]
if name == 'robot' or name == 'plane' or name == "init_table": continue
pos, orient = p.getBasePositionAndOrientation(obj_id, physicsClientId=self.id)
self.initial_pos[name] = pos
self.initial_orient[name] = orient
def set_to_default_joint_angles(self):
for obj_name in self.urdf_ids:
if obj_name == 'robot' or obj_name == 'plane' or obj_name == "init_table": continue
obj_id = self.urdf_ids[obj_name]
num_joints = p.getNumJoints(obj_id, physicsClientId=self.id)
for joint_idx in range(num_joints):
joint_limit_low, joint_limit_high = p.getJointInfo(obj_id, joint_idx, physicsClientId=self.id)[8:10]
if joint_limit_low > joint_limit_high:
joint_limit_low, joint_limit_high = joint_limit_high, joint_limit_low
joint_val = joint_limit_low + 0.06 * (joint_limit_high - joint_limit_low)
p.resetJointState(obj_id, joint_idx, joint_val, physicsClientId=self.id)
def handle_gpt_special_relationships(self, spatial_relationships):
# we support "on" and "in" for now, but this can be extended to more relationships
for spatial_relationship in spatial_relationships:
words = spatial_relationship.lower().split(",")
words = [word.strip().lstrip() for word in words]
if words[0] == "on":
obj_a = words[1]
obj_b = words[2]
if len(words) == 4:
obj_b_link = words[3] | obj_b_link_id = get_link_id_from_name(self, obj_b, obj_b_link) | 7 | 2023-10-31 19:44:09+00:00 | 12k |
KoeAI/LLVC | minimal_rvc/model.py | [
{
"identifier": "SynthesizerTrnMs256NSFSid",
"path": "minimal_rvc/models.py",
"snippet": "class SynthesizerTrnMs256NSFSid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n emb_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n self.emb_channels = emb_channels\n self.sr = sr\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder(\n inter_channels,\n hidden_channels,\n filter_channels,\n emb_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\n \"gin_channels:\",\n gin_channels,\n \"self.spk_embed_dim:\",\n self.spk_embed_dim,\n \"emb_channels:\",\n emb_channels,\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs256NSFSidNono",
"path": "minimal_rvc/models.py",
"snippet": "class SynthesizerTrnMs256NSFSidNono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n emb_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n self.emb_channels = emb_channels\n self.sr = sr\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder(\n inter_channels,\n hidden_channels,\n filter_channels,\n emb_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\n \"gin_channels:\",\n gin_channels,\n \"self.spk_embed_dim:\",\n self.spk_embed_dim,\n \"emb_channels:\",\n emb_channels,\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "VocalConvertPipeline",
"path": "minimal_rvc/pipeline.py",
"snippet": "class VocalConvertPipeline(object):\n def __init__(self, tgt_sr: int, device: Union[str, torch.device], is_half: bool, no_pad: bool = False):\n if isinstance(device, str):\n device = torch.device(device)\n if device.type == \"cuda\":\n vram = torch.cuda.get_device_properties(\n device).total_memory / 1024**3\n else:\n vram = None\n\n if vram is not None and vram <= 4:\n self.x_pad = 1\n self.x_query = 5\n self.x_center = 30\n self.x_max = 32\n elif vram is not None and vram <= 5:\n self.x_pad = 1\n self.x_query = 6\n self.x_center = 38\n self.x_max = 41\n else:\n self.x_pad = 3\n self.x_query = 10\n self.x_center = 60\n self.x_max = 65\n if no_pad:\n self.x_pad = 0\n\n self.sr = 16000 # hubert input sample rate\n self.window = 160 # hubert input window\n self.t_pad = self.sr * self.x_pad # padding time for each utterance\n self.t_pad_tgt = tgt_sr * self.x_pad\n self.t_pad2 = self.t_pad * 2\n self.t_query = self.sr * self.x_query # query time before and after query point\n self.t_center = self.sr * self.x_center # query cut point position\n self.t_max = self.sr * self.x_max # max time for no query\n self.device = device\n self.is_half = is_half\n\n self.model_rmvpe = RMVPE(\n f\"llvc_models/models/f0/rmvpe.pt\",\n is_half=self.is_half,\n device=self.device,\n )\n\n def get_optimal_torch_device(self, index: int = 0) -> torch.device:\n # Get cuda device\n if torch.cuda.is_available():\n # Very fast\n return torch.device(f\"cuda:{index % torch.cuda.device_count()}\")\n elif torch.backends.mps.is_available():\n return torch.device(\"mps\")\n # Insert an else here to grab \"xla\" devices if available. TO DO later. Requires the torch_xla.core.xla_model library\n # Else wise return the \"cpu\" as a torch device,\n return torch.device(\"cpu\")\n\n def get_f0_crepe_computation(\n self,\n x,\n f0_min,\n f0_max,\n p_len,\n # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time.\n hop_length=64,\n model=\"full\", # Either use crepe-tiny \"tiny\" or crepe \"full\". Default is full\n ):\n # fixes the F.conv2D exception. We needed to convert double to float.\n x = x.astype(np.float32)\n x /= np.quantile(np.abs(x), 0.999)\n torch_device = self.get_optimal_torch_device()\n audio = torch.from_numpy(x).to(torch_device, copy=True)\n audio = torch.unsqueeze(audio, dim=0)\n if audio.ndim == 2 and audio.shape[0] > 1:\n audio = torch.mean(audio, dim=0, keepdim=True).detach()\n audio = audio.detach()\n print(\"Initiating prediction with a crepe_hop_length of: \" + str(hop_length))\n pitch: Tensor = torchcrepe.predict(\n audio,\n self.sr,\n hop_length,\n f0_min,\n f0_max,\n model,\n batch_size=hop_length * 2,\n device=torch_device,\n pad=True\n )\n p_len = p_len or x.shape[0] // hop_length\n # Resize the pitch for final f0\n source = np.array(pitch.squeeze(0).cpu().float().numpy())\n source[source < 0.001] = np.nan\n target = np.interp(\n np.arange(0, len(source) * p_len, len(source)) / p_len,\n np.arange(0, len(source)),\n source\n )\n f0 = np.nan_to_num(target)\n return f0 # Resized f0\n\n def get_f0_official_crepe_computation(\n self,\n x,\n f0_min,\n f0_max,\n model=\"full\",\n ):\n # Pick a batch size that doesn't cause memory errors on your gpu\n batch_size = 512\n # Compute pitch using first gpu\n audio = torch.tensor(np.copy(x))[None].float()\n f0, pd = torchcrepe.predict(\n audio,\n self.sr,\n self.window,\n f0_min,\n f0_max,\n model,\n batch_size=batch_size,\n device=self.device,\n return_periodicity=True,\n )\n pd = torchcrepe.filter.median(pd, 3)\n f0 = torchcrepe.filter.mean(f0, 3)\n f0[pd < 0.1] = 0\n f0 = f0[0].cpu().numpy()\n return f0\n\n def get_f0(\n self,\n x: np.ndarray,\n p_len: int,\n f0_up_key: int,\n f0_method: str,\n f0_relative: bool,\n inp_f0: np.ndarray = None,\n ):\n f0_min = 50\n f0_max = 1100\n f0_mel_min = 1127 * np.log(1 + f0_min / 700)\n f0_mel_max = 1127 * np.log(1 + f0_max / 700)\n\n if f0_method == \"harvest\":\n f0, t = pyworld.harvest(\n x.astype(np.double),\n fs=self.sr,\n f0_ceil=f0_max,\n f0_floor=f0_min,\n frame_period=10,\n )\n f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"dio\":\n f0, t = pyworld.dio(\n x.astype(np.double),\n fs=self.sr,\n f0_ceil=f0_max,\n f0_floor=f0_min,\n frame_period=10,\n )\n f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"mangio-crepe\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, 160, \"full\")\n elif f0_method == \"crepe\":\n f0 = self.get_f0_official_crepe_computation(\n x, f0_min, f0_max, \"full\")\n elif f0_method == \"rmvpe\":\n f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)\n if f0_relative:\n if f0_method == \"rmvpe\" or f0_method == \"rmvpe_onnx\":\n # this is the average f0 of /test_wavs/2086-149214-0000.wav\n # by calculating f0 relative to this wav, we can ensure\n # consistent output pitch when converting from different speakers\n rel_f0 = 126.21\n else:\n raise ValueError(\"TODO: find rel_f0 for \" + f0_method)\n mean_f0 = np.mean(f0[f0 > 0])\n offset = np.round(12 * np.log2(mean_f0 / rel_f0))\n # print(\"offset: \" + str(offset))\n f0_up_key = f0_up_key - offset\n f0 *= pow(2, f0_up_key / 12)\n tf0 = self.sr // self.window # f0 points per second\n if inp_f0 is not None:\n delta_t = np.round(\n (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1\n ).astype(\"int16\")\n replace_f0 = np.interp(\n list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]\n )\n shape = f0[self.x_pad * tf0: self.x_pad *\n tf0 + len(replace_f0)].shape[0]\n f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)] = replace_f0[\n :shape\n ]\n\n f0bak = f0.copy()\n f0_mel = 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (\n f0_mel_max - f0_mel_min\n ) + 1\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > 255] = 255\n f0_coarse = np.rint(f0_mel).astype(int)\n return f0_coarse, f0bak # 1-0\n\n def _convert(\n self,\n model: HubertModel,\n embedding_output_layer: int,\n net_g: SynthesizerTrnMs256NSFSid,\n sid: int,\n audio: np.ndarray,\n pitch: np.ndarray,\n pitchf: np.ndarray,\n index: faiss.IndexIVFFlat,\n big_npy: np.ndarray,\n index_rate: float,\n ):\n feats = torch.from_numpy(audio)\n if self.is_half:\n feats = feats.half()\n else:\n feats = feats.float()\n if feats.dim() == 2: # double channels\n feats = feats.mean(-1)\n assert feats.dim() == 1, feats.dim()\n feats = feats.view(1, -1)\n padding_mask = torch.BoolTensor(\n feats.shape).to(self.device).fill_(False)\n\n half_support = (\n self.device.type == \"cuda\"\n and torch.cuda.get_device_capability(self.device)[0] >= 5.3\n )\n is_feats_dim_768 = net_g.emb_channels == 768\n\n if isinstance(model, tuple):\n feats = model[0](\n feats.squeeze(0).squeeze(0).to(self.device),\n return_tensors=\"pt\",\n sampling_rate=16000,\n )\n if self.is_half:\n feats = feats.input_values.to(self.device).half()\n else:\n feats = feats.input_values.to(self.device)\n with torch.no_grad():\n if is_feats_dim_768:\n feats = model[1](feats).last_hidden_state\n else:\n feats = model[1](feats).extract_features\n else:\n inputs = {\n \"source\": feats.half().to(self.device)\n if half_support\n else feats.to(self.device),\n \"padding_mask\": padding_mask.to(self.device),\n \"output_layer\": embedding_output_layer,\n }\n\n if not half_support:\n model = model.float()\n inputs[\"source\"] = inputs[\"source\"].float()\n\n with torch.no_grad():\n logits = model.extract_features(**inputs)\n if is_feats_dim_768:\n feats = logits[0]\n else:\n feats = model.final_proj(logits[0])\n\n if (\n isinstance(index, type(None)) == False\n and isinstance(big_npy, type(None)) == False\n and index_rate != 0\n ):\n npy = feats[0].cpu().numpy()\n if self.is_half:\n npy = npy.astype(\"float32\")\n\n score, ix = index.search(npy, k=8)\n weight = np.square(1 / score)\n weight /= weight.sum(axis=1, keepdims=True)\n npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)\n\n if self.is_half:\n npy = npy.astype(\"float16\")\n feats = (\n torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate\n + (1 - index_rate) * feats\n )\n\n feats = F.interpolate(feats.permute(0, 2, 1),\n scale_factor=2).permute(0, 2, 1)\n\n p_len = audio.shape[0] // self.window\n if feats.shape[1] < p_len:\n p_len = feats.shape[1]\n if pitch != None and pitchf != None:\n pitch = pitch[:, :p_len]\n pitchf = pitchf[:, :p_len]\n p_len = torch.tensor([p_len], device=self.device).long()\n with torch.no_grad():\n if pitch != None and pitchf != None:\n audio1 = (\n (net_g.infer(feats, p_len, pitch,\n pitchf, sid)[0][0, 0] * 32768)\n .data.cpu()\n .float()\n .numpy()\n .astype(np.int16)\n )\n else:\n audio1 = (\n (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768)\n .data.cpu()\n .float()\n .numpy()\n .astype(np.int16)\n )\n del feats, p_len, padding_mask\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio1\n\n def __call__(\n self,\n model: HubertModel,\n embedding_output_layer: int,\n net_g: SynthesizerTrnMs256NSFSid,\n sid: int,\n audio: np.ndarray,\n transpose: int,\n f0_method: str,\n file_index: str,\n index_rate: float,\n if_f0: bool,\n f0_relative: bool,\n f0_file: str = None,\n ):\n\n index = big_npy = None\n\n bh, ah = signal.butter(N=5, Wn=48, btype=\"high\", fs=16000)\n audio = signal.filtfilt(bh, ah, audio)\n\n audio_pad = np.pad(\n audio, (self.window // 2, self.window // 2), mode=\"reflect\")\n opt_ts = []\n if audio_pad.shape[0] > self.t_max:\n audio_sum = np.zeros_like(audio)\n for i in range(self.window):\n audio_sum += audio_pad[i: i - self.window]\n for t in range(self.t_center, audio.shape[0], self.t_center):\n opt_ts.append(\n t\n - self.t_query\n + np.where(\n np.abs(audio_sum[t - self.t_query: t + self.t_query])\n == np.abs(audio_sum[t - self.t_query: t + self.t_query]).min()\n )[0][0]\n )\n\n audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode=\"reflect\")\n p_len = audio_pad.shape[0] // self.window\n inp_f0 = None\n if hasattr(f0_file, \"name\"):\n try:\n with open(f0_file.name, \"r\") as f:\n lines = f.read().strip(\"\\n\").split(\"\\n\")\n inp_f0 = []\n for line in lines:\n inp_f0.append([float(i) for i in line.split(\",\")])\n inp_f0 = np.array(inp_f0, dtype=\"float32\")\n except:\n traceback.print_exc()\n sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()\n pitch, pitchf = None, None\n if if_f0 == 1:\n pitch, pitchf = self.get_f0(\n audio_pad, p_len, transpose, f0_method, f0_relative, inp_f0)\n pitch = pitch[:p_len]\n pitchf = pitchf[:p_len]\n if self.device.type == \"mps\":\n pitchf = pitchf.astype(np.float32)\n pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()\n pitchf = torch.tensor(\n pitchf, device=self.device).unsqueeze(0).float()\n\n audio_opt = []\n\n s = 0\n t = None\n\n for t in opt_ts:\n t = t // self.window * self.window\n if if_f0 == 1:\n audio_opt.append(\n self._convert(\n model,\n embedding_output_layer,\n net_g,\n sid,\n audio_pad[s: t + self.t_pad2 + self.window],\n pitch[:, s //\n self.window: (t + self.t_pad2) // self.window],\n pitchf[:, s //\n self.window: (t + self.t_pad2) // self.window],\n index,\n big_npy,\n index_rate,\n )[self.t_pad_tgt: -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self._convert(\n model,\n embedding_output_layer,\n net_g,\n sid,\n audio_pad[s: t + self.t_pad2 + self.window],\n None,\n None,\n index,\n big_npy,\n index_rate,\n )[self.t_pad_tgt: -self.t_pad_tgt]\n )\n s = t\n if if_f0 == 1:\n audio_opt.append(\n self._convert(\n model,\n embedding_output_layer,\n net_g,\n sid,\n audio_pad[t:],\n pitch[:, t // self.window:] if t is not None else pitch,\n pitchf[:, t // self.window:] if t is not None else pitchf,\n index,\n big_npy,\n index_rate,\n )[self.t_pad_tgt: -self.t_pad_tgt]\n )\n else:\n result = self._convert(\n model,\n embedding_output_layer,\n net_g,\n sid,\n audio_pad[t:],\n None,\n None,\n index,\n big_npy,\n index_rate,\n )\n audio_opt.append(\n result[self.t_pad_tgt: result.shape[-1] - self.t_pad_tgt]\n )\n audio_opt = np.concatenate(audio_opt)\n del pitch, pitchf, sid\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio_opt"
},
{
"identifier": "opts",
"path": "minimal_rvc/cmd_opts.py",
"snippet": ""
},
{
"identifier": "ROOT_DIR",
"path": "minimal_rvc/shared.py",
"snippet": "ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nMODELS_DIR = os.path.join(ROOT_DIR, \"llvc_models\", \"models\")\ndef has_mps():"
},
{
"identifier": "load_audio",
"path": "minimal_rvc/utils.py",
"snippet": "def load_audio(file: str, sr):\n try:\n # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26\n # This launches a subprocess to decode audio while down-mixing and resampling as necessary.\n # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.\n file = (\n file.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n ) # Prevent small white copy path head and tail with spaces and \" and return\n out, _ = (\n ffmpeg.input(file, threads=0)\n .output(\"-\", format=\"f32le\", acodec=\"pcm_f32le\", ac=1, ar=sr)\n .run(cmd=[\"ffmpeg\", \"-nostdin\"], capture_stdout=True, capture_stderr=True)\n )\n except Exception as e:\n raise RuntimeError(f\"Failed to load audio: {e}\")\n\n return np.frombuffer(out, np.float32).flatten()"
}
] | import os
import re
import torch
from typing import *
from fairseq import checkpoint_utils
from fairseq.models.hubert.hubert import HubertModel
from pydub import AudioSegment
from .models import (SynthesizerTrnMs256NSFSid, SynthesizerTrnMs256NSFSidNono)
from .pipeline import VocalConvertPipeline
from .cmd_opts import opts
from .shared import ROOT_DIR, device, is_half
from .utils import load_audio | 7,758 | # This module is based on code from ddPn08, liujing04, and teftef6220
# https://github.com/ddPn08/rvc-webui
# https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI
# https://github.com/teftef6220/Voice_Separation_and_Selection
# These modules are licensed under the MIT License.
AUDIO_OUT_DIR = opts.output_dir or os.path.join(ROOT_DIR, "outputs")
EMBEDDINGS_LIST = {
"hubert-base-japanese": (
"rinna_hubert_base_jp.pt",
"hubert-base-japanese",
"local",
),
"contentvec": ("checkpoint_best_legacy_500.pt", "contentvec", "local"),
}
def update_state_dict(state_dict):
if "params" in state_dict and state_dict["params"] is not None:
return
keys = [
"spec_channels",
"segment_size",
"inter_channels",
"hidden_channels",
"filter_channels",
"n_heads",
"n_layers",
"kernel_size",
"p_dropout",
"resblock",
"resblock_kernel_sizes",
"resblock_dilation_sizes",
"upsample_rates",
"upsample_initial_channel",
"upsample_kernel_sizes",
"spk_embed_dim",
"gin_channels",
"emb_channels",
"sr",
]
state_dict["params"] = {}
n = 0
for i, key in enumerate(keys):
i = i - n
if len(state_dict["config"]) != 19 and key == "emb_channels":
# backward compat.
n += 1
continue
state_dict["params"][key] = state_dict["config"][i]
if not "emb_channels" in state_dict["params"]:
if state_dict.get("version", "v1") == "v1":
state_dict["params"]["emb_channels"] = 256 # for backward compat.
state_dict["embedder_output_layer"] = 9
else:
state_dict["params"]["emb_channels"] = 768 # for backward compat.
state_dict["embedder_output_layer"] = 12
class VoiceConvertModel:
def __init__(self, model_name: str, state_dict: Dict[str, Any]) -> None:
update_state_dict(state_dict)
self.model_name = model_name
self.state_dict = state_dict
self.tgt_sr = state_dict["params"]["sr"]
f0 = state_dict.get("f0", 1)
state_dict["params"]["spk_embed_dim"] = state_dict["weight"][
"emb_g.weight"
].shape[0]
if not "emb_channels" in state_dict["params"]:
state_dict["params"]["emb_channels"] = 768 # for backward compat.
if f0 == 1:
| # This module is based on code from ddPn08, liujing04, and teftef6220
# https://github.com/ddPn08/rvc-webui
# https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI
# https://github.com/teftef6220/Voice_Separation_and_Selection
# These modules are licensed under the MIT License.
AUDIO_OUT_DIR = opts.output_dir or os.path.join(ROOT_DIR, "outputs")
EMBEDDINGS_LIST = {
"hubert-base-japanese": (
"rinna_hubert_base_jp.pt",
"hubert-base-japanese",
"local",
),
"contentvec": ("checkpoint_best_legacy_500.pt", "contentvec", "local"),
}
def update_state_dict(state_dict):
if "params" in state_dict and state_dict["params"] is not None:
return
keys = [
"spec_channels",
"segment_size",
"inter_channels",
"hidden_channels",
"filter_channels",
"n_heads",
"n_layers",
"kernel_size",
"p_dropout",
"resblock",
"resblock_kernel_sizes",
"resblock_dilation_sizes",
"upsample_rates",
"upsample_initial_channel",
"upsample_kernel_sizes",
"spk_embed_dim",
"gin_channels",
"emb_channels",
"sr",
]
state_dict["params"] = {}
n = 0
for i, key in enumerate(keys):
i = i - n
if len(state_dict["config"]) != 19 and key == "emb_channels":
# backward compat.
n += 1
continue
state_dict["params"][key] = state_dict["config"][i]
if not "emb_channels" in state_dict["params"]:
if state_dict.get("version", "v1") == "v1":
state_dict["params"]["emb_channels"] = 256 # for backward compat.
state_dict["embedder_output_layer"] = 9
else:
state_dict["params"]["emb_channels"] = 768 # for backward compat.
state_dict["embedder_output_layer"] = 12
class VoiceConvertModel:
def __init__(self, model_name: str, state_dict: Dict[str, Any]) -> None:
update_state_dict(state_dict)
self.model_name = model_name
self.state_dict = state_dict
self.tgt_sr = state_dict["params"]["sr"]
f0 = state_dict.get("f0", 1)
state_dict["params"]["spk_embed_dim"] = state_dict["weight"][
"emb_g.weight"
].shape[0]
if not "emb_channels" in state_dict["params"]:
state_dict["params"]["emb_channels"] = 768 # for backward compat.
if f0 == 1: | self.net_g = SynthesizerTrnMs256NSFSid( | 0 | 2023-10-28 01:58:49+00:00 | 12k |
baaivision/JudgeLM | judgelm/model/model_adapter.py | [
{
"identifier": "GptqConfig",
"path": "judgelm/modules/gptq.py",
"snippet": "class GptqConfig:\n ckpt: str = field(\n default=None,\n metadata={\n \"help\": \"Load quantized model. The path to the local GPTQ checkpoint.\"\n },\n )\n wbits: int = field(default=16, metadata={\"help\": \"#bits to use for quantization\"})\n groupsize: int = field(\n default=-1,\n metadata={\"help\": \"Groupsize to use for quantization; default uses full row.\"},\n )\n act_order: bool = field(\n default=True,\n metadata={\"help\": \"Whether to apply the activation order GPTQ heuristic\"},\n )"
},
{
"identifier": "load_gptq_quantized",
"path": "judgelm/modules/gptq.py",
"snippet": "def load_gptq_quantized(model_name, gptq_config: GptqConfig):\n print(\"Loading GPTQ quantized model...\")\n\n try:\n script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n module_path = os.path.join(script_path, \"../repositories/GPTQ-for-LLaMa\")\n\n sys.path.insert(0, module_path)\n from llama import load_quant\n except ImportError as e:\n print(f\"Error: Failed to load GPTQ-for-LLaMa. {e}\")\n sys.exit(-1)\n\n tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)\n # only `fastest-inference-4bit` branch cares about `act_order`\n if gptq_config.act_order:\n model = load_quant(\n model_name,\n find_gptq_ckpt(gptq_config),\n gptq_config.wbits,\n gptq_config.groupsize,\n act_order=gptq_config.act_order,\n )\n else:\n # other branches\n model = load_quant(\n model_name,\n find_gptq_ckpt(gptq_config),\n gptq_config.wbits,\n gptq_config.groupsize,\n )\n\n return model, tokenizer"
},
{
"identifier": "Conversation",
"path": "judgelm/conversation.py",
"snippet": "class Conversation:\n \"\"\"A class that manages prompt templates and keeps all conversation history.\"\"\"\n\n # The name of this template\n name: str\n # The system prompt\n system: str\n # Two roles\n roles: List[str]\n # All messages. Each item is (role, message).\n messages: List[List[str]]\n # The number of few shot examples\n offset: int\n # Separators\n sep_style: SeparatorStyle\n sep: str = None\n sep2: str = None\n # Stop criteria (the default one is EOS token)\n stop_str: str = None\n # Stops generation if meeting any token in this list\n stop_token_ids: List[int] = None\n\n def get_prompt(self) -> str:\n \"\"\"Get the prompt for generation.\"\"\"\n if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:\n ret = self.system + self.sep\n for role, message in self.messages:\n if message:\n ret += role + \": \" + message + self.sep\n else:\n ret += role + \":\"\n return ret\n elif self.sep_style == SeparatorStyle.ADD_COLON_TWO and \"### Response:\" in self.messages[0][-1]:\n seps = [self.sep, self.sep2]\n ret = self.messages[0][-1]\n\n if self.messages[1][-1] != None:\n ret += self.messages[1][-1]\n\n for i, (role, message) in enumerate(self.messages[2:]):\n if message:\n ret += role + \": \" + message + seps[i % 2]\n else:\n ret += role + \":\"\n\n print(\"===========================================\")\n print(\"ret: \", ret)\n print(\"===========================================\")\n return ret\n elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:\n seps = [self.sep, self.sep2]\n ret = self.system + seps[0]\n for i, (role, message) in enumerate(self.messages):\n if message:\n ret += role + \": \" + message + seps[i % 2]\n else:\n ret += role + \":\"\n return ret\n elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:\n ret = self.system + self.sep\n for role, message in self.messages:\n if message:\n ret += role + \": \" + message + self.sep\n else:\n ret += role + \": \" # must be end with a space\n return ret\n elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:\n ret = \"\" if self.system == \"\" else self.system + self.sep\n for role, message in self.messages:\n if message:\n ret += role + \"\\n\" + message + self.sep\n else:\n ret += role + \"\\n\"\n return ret\n elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:\n ret = self.system\n for role, message in self.messages:\n if message:\n ret += role + message + self.sep\n else:\n ret += role\n return ret\n elif self.sep_style == SeparatorStyle.NO_COLON_TWO:\n seps = [self.sep, self.sep2]\n ret = self.system\n for i, (role, message) in enumerate(self.messages):\n if message:\n ret += role + message + seps[i % 2]\n else:\n ret += role\n return ret\n elif self.sep_style == SeparatorStyle.RWKV:\n ret = self.system\n for i, (role, message) in enumerate(self.messages):\n if message:\n ret += (\n role\n + \": \"\n + message.replace(\"\\r\\n\", \"\\n\").replace(\"\\n\\n\", \"\\n\")\n )\n ret += \"\\n\\n\"\n else:\n ret += role + \":\"\n return ret\n elif self.sep_style == SeparatorStyle.LLAMA2:\n seps = [self.sep, self.sep2]\n ret = \"\"\n for i, (role, message) in enumerate(self.messages):\n if message:\n if i == 0:\n ret += self.system + message\n else:\n ret += role + \" \" + message + seps[i % 2]\n else:\n ret += role\n return ret\n elif self.sep_style == SeparatorStyle.CHATGLM:\n # source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308\n # source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926\n round_add_n = 1 if self.name == \"chatglm2\" else 0\n if self.system:\n ret = self.system + self.sep\n else:\n ret = \"\"\n\n for i, (role, message) in enumerate(self.messages):\n if i % 2 == 0:\n ret += f\"[Round {i//2 + round_add_n}]{self.sep}\"\n\n if message:\n ret += f\"{role}:{message}{self.sep}\"\n else:\n ret += f\"{role}:\"\n return ret\n elif self.sep_style == SeparatorStyle.CHATML:\n ret = \"\" if self.system == \"\" else self.system + self.sep + \"\\n\"\n for role, message in self.messages:\n if message:\n ret += role + \"\\n\" + message + self.sep + \"\\n\"\n else:\n ret += role + \"\\n\"\n return ret\n elif self.sep_style == SeparatorStyle.CHATINTERN:\n # source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771\n seps = [self.sep, self.sep2]\n ret = self.system\n for i, (role, message) in enumerate(self.messages):\n if i % 2 == 0:\n ret += \"<s>\"\n if message:\n ret += role + \":\" + message + seps[i % 2] + \"\\n\"\n else:\n ret += role + \":\"\n return ret\n elif self.sep_style == SeparatorStyle.DOLLY:\n seps = [self.sep, self.sep2]\n ret = self.system\n for i, (role, message) in enumerate(self.messages):\n if message:\n ret += role + \":\\n\" + message + seps[i % 2]\n if i % 2 == 1:\n ret += \"\\n\\n\"\n else:\n ret += role + \":\\n\"\n return ret\n elif self.sep_style == SeparatorStyle.PHOENIX:\n ret = self.system\n for role, message in self.messages:\n if message:\n ret += role + \": \" + \"<s>\" + message + \"</s>\"\n else:\n ret += role + \": \" + \"<s>\"\n return ret\n elif self.sep_style == SeparatorStyle.ROBIN:\n ret = self.system + self.sep\n for role, message in self.messages:\n if message:\n ret += role + \":\\n\" + message + self.sep\n else:\n ret += role + \":\\n\"\n return ret\n else:\n raise ValueError(f\"Invalid style: {self.sep_style}\")\n\n def append_message(self, role: str, message: str):\n \"\"\"Append a new message.\"\"\"\n self.messages.append([role, message])\n\n def update_last_message(self, message: str):\n \"\"\"Update the last output.\n\n The last message is typically set to be None when constructing the prompt,\n so we need to update it in-place after getting the response from a model.\n \"\"\"\n self.messages[-1][1] = message\n\n def to_gradio_chatbot(self):\n \"\"\"Convert the conversation to gradio chatbot format.\"\"\"\n ret = []\n for i, (role, msg) in enumerate(self.messages[self.offset :]):\n if i % 2 == 0:\n ret.append([msg, None])\n else:\n ret[-1][-1] = msg\n return ret\n\n def to_openai_api_messages(self):\n \"\"\"Convert the conversation to OpenAI chat completion format.\"\"\"\n ret = [{\"role\": \"system\", \"content\": self.system}]\n\n for i, (_, msg) in enumerate(self.messages[self.offset :]):\n if i % 2 == 0:\n ret.append({\"role\": \"user\", \"content\": msg})\n else:\n if msg is not None:\n ret.append({\"role\": \"assistant\", \"content\": msg})\n return ret\n\n def copy(self):\n return Conversation(\n name=self.name,\n system=self.system,\n roles=self.roles,\n messages=[[x, y] for x, y in self.messages],\n offset=self.offset,\n sep_style=self.sep_style,\n sep=self.sep,\n sep2=self.sep2,\n stop_str=self.stop_str,\n stop_token_ids=self.stop_token_ids,\n )\n\n def dict(self):\n return {\n \"template_name\": self.name,\n \"system\": self.system,\n \"roles\": self.roles,\n \"messages\": self.messages,\n \"offset\": self.offset,\n }"
},
{
"identifier": "get_conv_template",
"path": "judgelm/conversation.py",
"snippet": "def get_conv_template(name: str) -> Conversation:\n \"\"\"Get a conversation template.\"\"\"\n return conv_templates[name].copy()"
},
{
"identifier": "load_compress_model",
"path": "judgelm/model/compression.py",
"snippet": "def load_compress_model(model_path, device, torch_dtype, use_fast, revision=\"main\"):\n # partially load model\n # `use_fast=True`` is not supported for some models.\n try:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path, use_fast=use_fast, revision=revision, trust_remote_code=True\n )\n except TypeError:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path, use_fast=False, revision=revision, trust_remote_code=True\n )\n with init_empty_weights():\n # `trust_remote_code` should be set as `True` for both AutoConfig and AutoModel\n config = AutoConfig.from_pretrained(\n model_path,\n low_cpu_mem_usage=True,\n torch_dtype=torch_dtype,\n trust_remote_code=True,\n revision=revision,\n )\n # some models are loaded by AutoModel but not AutoModelForCausalLM,\n # such as chatglm, chatglm2\n try:\n model = AutoModelForCausalLM.from_config(config, trust_remote_code=True)\n except NameError:\n model = AutoModel.from_config(config, trust_remote_code=True)\n linear_weights = get_compressed_list(model)\n if os.path.exists(model_path):\n # `model_path` is a local folder\n base_pattern = os.path.join(model_path, \"pytorch_model*.bin\")\n else:\n # `model_path` is a cached Hugging Face repo\n # We don't necessarily need to download the model' repo again if there is a cache.\n # So check the default huggingface cache first.\n model_path_temp = os.path.join(\n os.getenv(\"HOME\"),\n \".cache/huggingface/hub\",\n \"models--\" + model_path.replace(\"/\", \"--\"),\n \"snapshots/\",\n )\n if os.path.exists(model_path_temp):\n temp_last_dir = os.listdir(model_path_temp)[-1]\n model_path = os.path.join(model_path_temp, temp_last_dir)\n else:\n model_path = snapshot_download(model_path, revision=revision)\n base_pattern = os.path.join(model_path, \"pytorch_model*.bin\")\n\n files = glob.glob(base_pattern)\n\n compressed_state_dict = {}\n\n for filename in tqdm(files):\n tmp_state_dict = torch.load(filename)\n for name in tmp_state_dict:\n if name in linear_weights:\n tensor = tmp_state_dict[name].to(device).data.to(torch_dtype)\n compressed_state_dict[name] = compress(\n tensor, default_compression_config\n )\n else:\n compressed_state_dict[name] = tmp_state_dict[name].to(device)\n tmp_state_dict[name] = None\n tensor = None\n gc.collect()\n torch.cuda.empty_cache()\n if device == \"xpu\":\n torch.xpu.empty_cache()\n\n for name in model.state_dict():\n if name not in linear_weights:\n set_module_tensor_to_device(\n model, name, device, value=compressed_state_dict[name]\n )\n apply_compressed_weight(model, compressed_state_dict, device)\n\n model.to(device)\n\n return model, tokenizer"
},
{
"identifier": "generate_stream_chatglm",
"path": "judgelm/model/model_chatglm.py",
"snippet": "@torch.inference_mode()\ndef generate_stream_chatglm(\n model,\n tokenizer,\n params,\n device,\n context_len=2048,\n stream_interval=2,\n judge_sent_end=False,\n):\n prompt = params[\"prompt\"]\n temperature = float(params.get(\"temperature\", 1.0))\n repetition_penalty = float(params.get(\"repetition_penalty\", 1.0))\n top_p = float(params.get(\"top_p\", 1.0))\n max_new_tokens = int(params.get(\"max_new_tokens\", 256))\n echo = params.get(\"echo\", True)\n\n inputs = tokenizer([prompt], return_tensors=\"pt\").to(model.device)\n input_echo_len = len(inputs[\"input_ids\"][0])\n\n gen_kwargs = {\n \"max_length\": max_new_tokens + input_echo_len,\n \"do_sample\": True if temperature > 1e-5 else False,\n \"top_p\": top_p,\n \"repetition_penalty\": repetition_penalty,\n \"logits_processor\": [invalid_score_processor],\n }\n if temperature > 1e-5:\n gen_kwargs[\"temperature\"] = temperature\n\n total_len = 0\n for total_ids in model.stream_generate(**inputs, **gen_kwargs):\n total_ids = total_ids.tolist()[0]\n total_len = len(total_ids)\n if echo:\n output_ids = total_ids\n else:\n output_ids = total_ids[input_echo_len:]\n response = tokenizer.decode(output_ids)\n response = process_response(response)\n\n yield {\n \"text\": response,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": total_len - input_echo_len,\n \"total_tokens\": total_len,\n },\n \"finish_reason\": None,\n }\n\n # TODO: ChatGLM stop when it reach max length\n # Only last stream result contains finish_reason, we set finish_reason as stop\n ret = {\n \"text\": response,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": total_len - input_echo_len,\n \"total_tokens\": total_len,\n },\n \"finish_reason\": \"stop\",\n }\n yield ret"
},
{
"identifier": "generate_stream_codet5p",
"path": "judgelm/model/model_codet5p.py",
"snippet": "@torch.inference_mode()\ndef generate_stream_codet5p(\n model,\n tokenizer,\n params,\n device,\n context_len=2048,\n stream_interval=2,\n judge_sent_end=False,\n):\n prompt = params[\"prompt\"]\n temperature = float(params.get(\"temperature\", 1.0))\n repetition_penalty = float(params.get(\"repetition_penalty\", 1.0))\n top_p = float(params.get(\"top_p\", 1.0))\n top_k = int(params.get(\"top_k\", 50)) # -1 means disable\n max_new_tokens = int(params.get(\"max_new_tokens\", 1024))\n stop_token_ids = params.get(\"stop_token_ids\", None) or []\n stop_token_ids.append(tokenizer.eos_token_id)\n\n decode_config = dict(skip_special_tokens=True, clean_up_tokenization_spaces=True)\n streamer = TextIteratorStreamer(tokenizer, **decode_config)\n encoding = tokenizer(prompt, return_tensors=\"pt\").to(device)\n input_ids = encoding.input_ids\n encoding[\"decoder_input_ids\"] = encoding[\"input_ids\"].clone()\n input_echo_len = len(input_ids)\n\n generation_config = GenerationConfig(\n max_new_tokens=max_new_tokens,\n do_sample=temperature >= 1e-5,\n temperature=temperature,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=10,\n top_p=top_p,\n top_k=top_k,\n eos_token_id=stop_token_ids,\n )\n\n class CodeBlockStopper(StoppingCriteria):\n def __call__(\n self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs\n ) -> bool:\n # Code-completion is open-end generation.\n # We check \\n\\n to stop at end of a code block.\n if list(input_ids[0][-2:]) == [628, 198]:\n return True\n return False\n\n gen_kwargs = dict(\n **encoding,\n streamer=streamer,\n generation_config=generation_config,\n stopping_criteria=StoppingCriteriaList([CodeBlockStopper()]),\n )\n thread = Thread(target=model.generate, kwargs=gen_kwargs)\n thread.start()\n i = 0\n output = \"\"\n for new_text in streamer:\n i += 1\n output += new_text\n if i % stream_interval == 0 or i == max_new_tokens - 1:\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": None,\n }\n if i >= max_new_tokens:\n break\n\n if i >= max_new_tokens:\n finish_reason = \"length\"\n else:\n finish_reason = \"stop\"\n\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": finish_reason,\n }\n thread.join()\n\n # clean\n gc.collect()\n torch.cuda.empty_cache()\n if device == \"xpu\":\n torch.xpu.empty_cache()"
},
{
"identifier": "generate_stream_falcon",
"path": "judgelm/model/model_falcon.py",
"snippet": "@torch.inference_mode()\ndef generate_stream_falcon(\n model,\n tokenizer,\n params,\n device,\n context_len=2048,\n stream_interval=2,\n judge_sent_end=False,\n):\n prompt = params[\"prompt\"]\n len_prompt = len(prompt)\n temperature = float(params.get(\"temperature\", 1.0))\n repetition_penalty = float(params.get(\"repetition_penalty\", 1.0))\n top_p = float(params.get(\"top_p\", 1.0))\n top_k = int(params.get(\"top_k\", 50)) # -1 means disable\n max_new_tokens = int(params.get(\"max_new_tokens\", 256))\n stop_str = params.get(\"stop\", None)\n echo = bool(params.get(\"echo\", True))\n stop_token_ids = params.get(\"stop_token_ids\", None) or []\n stop_token_ids.append(tokenizer.eos_token_id)\n\n inputs = tokenizer(prompt, return_tensors=\"pt\").to(model.device)\n input_ids = inputs[\"input_ids\"]\n attention_mask = inputs[\"attention_mask\"]\n\n max_src_len = context_len - max_new_tokens - 8\n\n input_ids = input_ids[-max_src_len:] # truncate from the left\n attention_mask = attention_mask[-max_src_len:] # truncate from the left\n input_echo_len = len(input_ids)\n\n decode_config = dict(skip_special_tokens=True, clean_up_tokenization_spaces=True)\n streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, **decode_config)\n\n generation_config = GenerationConfig(\n max_new_tokens=max_new_tokens,\n do_sample=temperature >= 1e-5,\n temperature=temperature,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=10,\n top_p=top_p,\n top_k=top_k,\n eos_token_id=stop_token_ids,\n )\n\n generation_kwargs = dict(\n inputs=input_ids,\n attention_mask=attention_mask,\n streamer=streamer,\n generation_config=generation_config,\n )\n\n thread = Thread(target=model.generate, kwargs=generation_kwargs)\n thread.start()\n\n if echo:\n # means keep the prompt\n output = prompt\n else:\n output = \"\"\n\n for i, new_text in enumerate(streamer):\n output += new_text\n if i % stream_interval == 0:\n if echo:\n rfind_start = len_prompt\n else:\n rfind_start = 0\n\n partially_stopped = False\n if stop_str:\n if isinstance(stop_str, str):\n pos = output.rfind(stop_str, rfind_start)\n if pos != -1:\n output = output[:pos]\n else:\n partially_stopped = is_partial_stop(output, stop_str)\n elif isinstance(stop_str, Iterable):\n for each_stop in stop_str:\n pos = output.rfind(each_stop, rfind_start)\n if pos != -1:\n output = output[:pos]\n break\n else:\n partially_stopped = is_partial_stop(output, each_stop)\n if partially_stopped:\n break\n else:\n raise ValueError(\"Invalid stop field type.\")\n\n # prevent yielding partial stop sequence\n if not partially_stopped:\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": None,\n }\n output = output.strip()\n\n # finish stream event, which contains finish reason\n if i == max_new_tokens - 1:\n finish_reason = \"length\"\n elif partially_stopped:\n finish_reason = None\n else:\n finish_reason = \"stop\"\n\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": finish_reason,\n }\n\n # clean\n gc.collect()\n torch.cuda.empty_cache()\n if device == \"xpu\":\n torch.xpu.empty_cache()"
},
{
"identifier": "replace_llama_attn_with_non_inplace_operations",
"path": "judgelm/model/monkey_patch_non_inplace.py",
"snippet": "def replace_llama_attn_with_non_inplace_operations():\n \"\"\"Avoid bugs in mps backend by not using in-place operations.\"\"\"\n transformers.models.llama.modeling_llama.LlamaAttention.forward = forward"
},
{
"identifier": "get_gpu_memory",
"path": "judgelm/utils.py",
"snippet": "def get_gpu_memory(max_gpus=None):\n \"\"\"Get available memory for each GPU.\"\"\"\n import torch\n\n gpu_memory = []\n num_gpus = (\n torch.cuda.device_count()\n if max_gpus is None\n else min(max_gpus, torch.cuda.device_count())\n )\n\n for gpu_id in range(num_gpus):\n with torch.cuda.device(gpu_id):\n device = torch.cuda.current_device()\n gpu_properties = torch.cuda.get_device_properties(device)\n total_memory = gpu_properties.total_memory / (1024**3)\n allocated_memory = torch.cuda.memory_allocated() / (1024**3)\n available_memory = total_memory - allocated_memory\n gpu_memory.append(available_memory)\n return gpu_memory"
}
] | import math
import os
import sys
import warnings
import accelerate
import psutil
import torch
import intel_extension_for_pytorch as ipex
from typing import Dict, List, Optional
from functools import cache
from functools import lru_cache as cache
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
LlamaTokenizer,
LlamaForCausalLM,
T5Tokenizer,
)
from judgelm.modules.gptq import GptqConfig, load_gptq_quantized
from judgelm.conversation import Conversation, get_conv_template
from judgelm.model.compression import load_compress_model
from judgelm.model.model_chatglm import generate_stream_chatglm
from judgelm.model.model_codet5p import generate_stream_codet5p
from judgelm.model.model_falcon import generate_stream_falcon
from judgelm.model.monkey_patch_non_inplace import (
replace_llama_attn_with_non_inplace_operations,
)
from judgelm.utils import get_gpu_memory
from transformers import BitsAndBytesConfig
from judgelm.serve.inference import generate_stream
from peft import PeftConfig, PeftModel
from peft import PeftConfig, PeftModel
from fastchat.model.llama_condense_monkey_patch import (
replace_llama_with_condense,
)
from fastchat.model.rwkv_model import RwkvModel | 7,687 | model = AutoModelForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
)
except NameError:
model = AutoModel.from_pretrained(
model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
)
return model, tokenizer
def load_compress_model(self, model_path, device, torch_dtype, revision="main"):
return load_compress_model(
model_path,
device,
torch_dtype,
use_fast=self.use_fast_tokenizer,
revision=revision,
)
def get_default_conv_template(self, model_path: str) -> Conversation:
return get_conv_template("one_shot")
# A global registry for all model adapters
# TODO (lmzheng): make it a priority queue.
model_adapters: List[BaseModelAdapter] = []
def register_model_adapter(cls):
"""Register a model adapter."""
model_adapters.append(cls())
@cache
def get_model_adapter(model_path: str) -> BaseModelAdapter:
"""Get a model adapter for a model_path."""
model_path_basename = os.path.basename(os.path.normpath(model_path))
# Try the basename of model_path at first
for adapter in model_adapters:
if adapter.match(model_path_basename) and type(adapter) != BaseModelAdapter:
return adapter
# Then try the full path
for adapter in model_adapters:
if adapter.match(model_path):
return adapter
raise ValueError(f"No valid model adapter for {model_path}")
def raise_warning_for_incompatible_cpu_offloading_configuration(
device: str, load_8bit: bool, cpu_offloading: bool
):
if cpu_offloading:
if not load_8bit:
warnings.warn(
"The cpu-offloading feature can only be used while also using 8-bit-quantization.\n"
"Use '--load-8bit' to enable 8-bit-quantization\n"
"Continuing without cpu-offloading enabled\n"
)
return False
if not "linux" in sys.platform:
warnings.warn(
"CPU-offloading is only supported on linux-systems due to the limited compatability with the bitsandbytes-package\n"
"Continuing without cpu-offloading enabled\n"
)
return False
if device != "cuda":
warnings.warn(
"CPU-offloading is only enabled when using CUDA-devices\n"
"Continuing without cpu-offloading enabled\n"
)
return False
return cpu_offloading
def load_model(
model_path: str,
device: str,
num_gpus: int,
max_gpu_memory: Optional[str] = None,
load_8bit: bool = False,
cpu_offloading: bool = False,
gptq_config: Optional[GptqConfig] = None,
revision: str = "main",
debug: bool = False,
):
"""Load a model from Hugging Face."""
# get model adapter
adapter = get_model_adapter(model_path)
# Handle device mapping
cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration(
device, load_8bit, cpu_offloading
)
if device == "cpu":
kwargs = {"torch_dtype": torch.float32}
elif device == "cuda":
kwargs = {"torch_dtype": torch.float16}
if num_gpus != 1:
kwargs["device_map"] = "auto"
if max_gpu_memory is None:
kwargs[
"device_map"
] = "sequential" # This is important for not the same VRAM sizes
available_gpu_memory = get_gpu_memory(num_gpus)
kwargs["max_memory"] = {
i: str(int(available_gpu_memory[i] * 0.99)) + "GiB" # change it to 0.93 to avoid OOM
for i in range(num_gpus)
}
# 将 key 为 1 时的 max_memory 设置为 0.93 倍 available_gpu_memory
kwargs["max_memory"][0] = str(int(available_gpu_memory[0] * 0.72)) + "GiB"
# print content of kwargs["max_memory"]
print("kwargs['max_memory'] = ", kwargs["max_memory"])
else:
kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)}
elif device == "mps":
kwargs = {"torch_dtype": torch.float16}
# Avoid bugs in mps backend by not using in-place operations.
| """Model adapter registration."""
if sys.version_info >= (3, 9):
else:
# Check an environment variable to check if we should be sharing Peft model
# weights. When false we treat all Peft models as separate.
peft_share_base_weights = (
os.environ.get("PEFT_SHARE_BASE_WEIGHTS", "false").lower() == "true"
)
class BaseModelAdapter:
"""The base and the default model adapter."""
use_fast_tokenizer = True
def match(self, model_path: str):
return True
def load_model(self, model_path: str, from_pretrained_kwargs: dict):
revision = from_pretrained_kwargs.get("revision", "main")
try:
tokenizer = AutoTokenizer.from_pretrained(
model_path,
use_fast=self.use_fast_tokenizer,
revision=revision,
)
except TypeError:
tokenizer = AutoTokenizer.from_pretrained(
model_path,
use_fast=False,
revision=revision,
)
try:
model = AutoModelForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
)
except NameError:
model = AutoModel.from_pretrained(
model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
)
return model, tokenizer
def load_compress_model(self, model_path, device, torch_dtype, revision="main"):
return load_compress_model(
model_path,
device,
torch_dtype,
use_fast=self.use_fast_tokenizer,
revision=revision,
)
def get_default_conv_template(self, model_path: str) -> Conversation:
return get_conv_template("one_shot")
# A global registry for all model adapters
# TODO (lmzheng): make it a priority queue.
model_adapters: List[BaseModelAdapter] = []
def register_model_adapter(cls):
"""Register a model adapter."""
model_adapters.append(cls())
@cache
def get_model_adapter(model_path: str) -> BaseModelAdapter:
"""Get a model adapter for a model_path."""
model_path_basename = os.path.basename(os.path.normpath(model_path))
# Try the basename of model_path at first
for adapter in model_adapters:
if adapter.match(model_path_basename) and type(adapter) != BaseModelAdapter:
return adapter
# Then try the full path
for adapter in model_adapters:
if adapter.match(model_path):
return adapter
raise ValueError(f"No valid model adapter for {model_path}")
def raise_warning_for_incompatible_cpu_offloading_configuration(
device: str, load_8bit: bool, cpu_offloading: bool
):
if cpu_offloading:
if not load_8bit:
warnings.warn(
"The cpu-offloading feature can only be used while also using 8-bit-quantization.\n"
"Use '--load-8bit' to enable 8-bit-quantization\n"
"Continuing without cpu-offloading enabled\n"
)
return False
if not "linux" in sys.platform:
warnings.warn(
"CPU-offloading is only supported on linux-systems due to the limited compatability with the bitsandbytes-package\n"
"Continuing without cpu-offloading enabled\n"
)
return False
if device != "cuda":
warnings.warn(
"CPU-offloading is only enabled when using CUDA-devices\n"
"Continuing without cpu-offloading enabled\n"
)
return False
return cpu_offloading
def load_model(
model_path: str,
device: str,
num_gpus: int,
max_gpu_memory: Optional[str] = None,
load_8bit: bool = False,
cpu_offloading: bool = False,
gptq_config: Optional[GptqConfig] = None,
revision: str = "main",
debug: bool = False,
):
"""Load a model from Hugging Face."""
# get model adapter
adapter = get_model_adapter(model_path)
# Handle device mapping
cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration(
device, load_8bit, cpu_offloading
)
if device == "cpu":
kwargs = {"torch_dtype": torch.float32}
elif device == "cuda":
kwargs = {"torch_dtype": torch.float16}
if num_gpus != 1:
kwargs["device_map"] = "auto"
if max_gpu_memory is None:
kwargs[
"device_map"
] = "sequential" # This is important for not the same VRAM sizes
available_gpu_memory = get_gpu_memory(num_gpus)
kwargs["max_memory"] = {
i: str(int(available_gpu_memory[i] * 0.99)) + "GiB" # change it to 0.93 to avoid OOM
for i in range(num_gpus)
}
# 将 key 为 1 时的 max_memory 设置为 0.93 倍 available_gpu_memory
kwargs["max_memory"][0] = str(int(available_gpu_memory[0] * 0.72)) + "GiB"
# print content of kwargs["max_memory"]
print("kwargs['max_memory'] = ", kwargs["max_memory"])
else:
kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)}
elif device == "mps":
kwargs = {"torch_dtype": torch.float16}
# Avoid bugs in mps backend by not using in-place operations. | replace_llama_attn_with_non_inplace_operations() | 8 | 2023-10-26 19:41:07+00:00 | 12k |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.