repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
Q-Future/Q-Align | q_align/evaluate/iaa_eval.py | [
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "q_align/constants.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_TOKEN",
"path": "q_align/constants.py",
"snippet": "DEFAULT_IMAGE_TOKEN = \"<|image|>\""
},
{
"identifier": "conv_templates",
"path": "q_align/conversation.py",
"snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n TWO_NO_SYS = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):"
},
{
"identifier": "load_pretrained_model",
"path": "q_align/model/builder.py",
"snippet": "def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map=\"auto\", device=\"cuda\"):\n kwargs = {\"device_map\": device_map}\n\n if device != \"cuda\":\n kwargs['device_map'] = {\"\": device}\n\n if load_8bit:\n kwargs['load_in_8bit'] = True\n elif load_4bit:\n kwargs['load_in_4bit'] = True\n kwargs['quantization_config'] = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4'\n )\n else:\n kwargs['torch_dtype'] = torch.float16\n if 'q-align' in model_name.lower():\n # Load LLaVA model\n if 'lora' in model_name.lower() and model_base is None:\n warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')\n if 'lora' in model_name.lower() and model_base is not None:\n lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n print('Loading mPLUG-Owl2 from base model...')\n model = MPLUGOwl2LlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)\n token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features\n if model.lm_head.weight.shape[0] != token_num:\n model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n\n print('Loading additional mPLUG-Owl2 weights...')\n if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):\n non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')\n print(non_lora_trainables.keys())\n else:\n # this is probably from HF Hub\n from huggingface_hub import hf_hub_download\n def load_from_hf(repo_id, filename, subfolder=None):\n cache_file = hf_hub_download(\n repo_id=repo_id,\n filename=filename,\n subfolder=subfolder)\n return torch.load(cache_file, map_location='cpu')\n non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')\n non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}\n if any(k.startswith('model.model.') for k in non_lora_trainables):\n non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}\n model.load_state_dict(non_lora_trainables, strict=False)\n\n from peft import PeftModel\n print('Loading LoRA weights...')\n model = PeftModel.from_pretrained(model, model_path)\n print('Merging LoRA weights...')\n model = model.merge_and_unload()\n print('Model is loaded...')\n elif model_base is not None:\n # this may be mm projector only\n print('Loading mPLUG-Owl2 from base model...')\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n cfg_pretrained = AutoConfig.from_pretrained(model_path)\n model = MPLUGOwl2LlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = MPLUGOwl2LlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n else:\n # Load language model\n if model_base is not None:\n # PEFT model\n from peft import PeftModel\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, **kwargs)\n print(f\"Loading LoRA weights from {model_path}\")\n model = PeftModel.from_pretrained(model, model_path)\n print(f\"Merging weights\")\n model = model.merge_and_unload()\n print('Convert to FP16...')\n model.to(torch.float16)\n else:\n use_fast = False\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n \n #vision_tower = model.get_model().vision_model\n #print(vision_tower.device)\n #vision_tower.to(device=device, dtype=torch.float16)\n image_processor = CLIPImageProcessor.from_pretrained(model_path)\n\n if hasattr(model.config, \"max_sequence_length\"):\n context_len = model.config.max_sequence_length\n else:\n context_len = 2048\n\n return tokenizer, model, image_processor, context_len"
},
{
"identifier": "process_images",
"path": "q_align/mm_utils.py",
"snippet": "def process_images(images, image_processor, model_cfg=None):\n if model_cfg is not None:\n image_aspect_ratio = getattr(model_cfg, \"image_aspect_ratio\", None)\n else:\n image_aspect_ratio = 'resize'\n new_images = []\n if image_aspect_ratio == 'pad':\n for image in images:\n image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))\n image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]\n new_images.append(image)\n elif image_aspect_ratio == 'resize':\n for image in images:\n max_edge = max(image.size)\n image = image.resize((max_edge, max_edge))\n image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]\n new_images.append(image)\n else:\n return image_processor(images, return_tensors='pt')['pixel_values']\n if all(x.shape == new_images[0].shape for x in new_images):\n new_images = torch.stack(new_images, dim=0)\n return new_images"
},
{
"identifier": "tokenizer_image_token",
"path": "q_align/mm_utils.py",
"snippet": "def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):\n prompt_chunks = [tokenizer(chunk).input_ids if len(chunk) > 0 else [] for chunk in prompt.split(DEFAULT_IMAGE_TOKEN)]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids"
},
{
"identifier": "get_model_name_from_path",
"path": "q_align/mm_utils.py",
"snippet": "def get_model_name_from_path(model_path):\n model_path = model_path.strip(\"/\")\n model_paths = model_path.split(\"/\")\n if model_paths[-1].startswith('checkpoint-'):\n return model_paths[-2] + \"_\" + model_paths[-1]\n else:\n return model_paths[-1]"
},
{
"identifier": "KeywordsStoppingCriteria",
"path": "q_align/mm_utils.py",
"snippet": "class KeywordsStoppingCriteria(StoppingCriteria):\n def __init__(self, keywords, tokenizer, input_ids):\n self.keywords = keywords\n self.keyword_ids = []\n self.max_keyword_len = 0\n for keyword in keywords:\n cur_keyword_ids = tokenizer(keyword).input_ids\n if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:\n cur_keyword_ids = cur_keyword_ids[1:]\n if len(cur_keyword_ids) > self.max_keyword_len:\n self.max_keyword_len = len(cur_keyword_ids)\n self.keyword_ids.append(torch.tensor(cur_keyword_ids))\n self.tokenizer = tokenizer\n self.start_len = input_ids.shape[1]\n\n def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n assert output_ids.shape[0] == 1, \"Only support batch size 1 (yet)\" # TODO\n offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len)\n self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]\n for keyword_id in self.keyword_ids:\n if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all():\n return True\n outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]\n for keyword in self.keywords:\n if keyword in outputs:\n return True\n return False"
}
] | import argparse
import torch
import requests
import json
import os
import numpy as np
import torch
import json
from q_align.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
from q_align.conversation import conv_templates, SeparatorStyle
from q_align.model.builder import load_pretrained_model
from q_align.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
from PIL import Image
from PIL import ImageFile
from PIL import Image
from io import BytesIO
from transformers import TextStreamer
from scipy.stats import spearmanr, pearsonr
from tqdm import tqdm
from collections import defaultdict | 2,875 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
def wa5(logits):
logprobs = np.array([logits["excellent"], logits["good"], logits["fair"], logits["poor"], logits["bad"]])
probs = np.exp(logprobs) / np.sum(np.exp(logprobs))
return np.inner(probs, np.array([1,0.75,0.5,0.25,0.]))
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def load_image(image_file):
if image_file.startswith('http://') or image_file.startswith('https://'):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(image_file).convert('RGB')
return image
def main(args):
# Model
disable_torch_init()
model_name = get_model_name_from_path(args.model_path)
|
ImageFile.LOAD_TRUNCATED_IMAGES = True
def wa5(logits):
logprobs = np.array([logits["excellent"], logits["good"], logits["fair"], logits["poor"], logits["bad"]])
probs = np.exp(logprobs) / np.sum(np.exp(logprobs))
return np.inner(probs, np.array([1,0.75,0.5,0.25,0.]))
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def load_image(image_file):
if image_file.startswith('http://') or image_file.startswith('https://'):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(image_file).convert('RGB')
return image
def main(args):
# Model
disable_torch_init()
model_name = get_model_name_from_path(args.model_path) | tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device) | 3 | 2023-12-14 03:36:30+00:00 | 4k |
open-compass/T-Eval | teval/evaluators/instruct_evaluator.py | [
{
"identifier": "parse_string",
"path": "teval/utils/template.py",
"snippet": "def parse_string(template: str, input_string: str, allow_newline: bool=False) -> dict:\n \"\"\"Return a dictionary whose keys are from input template and value is\n responding content from input_string.\n\n Args:\n template (str): Format template with keyword-only argument. For\n example '{who} like {what}'\n input_string (str): Input string will be parsed.\n allow_newline (boolen): Whether allow '\\n' in {} during RE match, default to False.\n\n Returns:\n dict: Parsed data from input string according to format string. If\n input string doesn't match template, It will return None.\n\n Examples:\n >>> template = '{who} like {what}'\n >>> input_string = 'monkey like banana'\n >>> data = parse_string(template, input_string)\n >>> data\n >>> {'who': 'monkey', 'what': 'banana'}\n >>> input_string = 'monkey likes banana'\n >>> data = parse_string(template, input_string)\n >>> data\n >>> None\n >>> template = '{what} like {what}'\n >>> input_string = 'monkey like banana'\n >>> data = parse_string(template, input_string)\n >>> data\n >>> {'what': ['monkey', 'banana']}\n \"\"\"\n\n formatter = Formatter()\n context = []\n keys = []\n for v in formatter.parse(template):\n # v is (literal_text, field_name, format_spec, conversion)\n if v[1] is not None:\n keys.append(v[1])\n context.append(v[0])\n pattern = template\n for k in keys:\n pattern = pattern.replace('{' + f'{k}' + '}', '(.*)')\n # pattern = re.compile(rf'{pattern}')\n values = re.findall(pattern, input_string, re.S if allow_newline else 0)\n if len(values) < 1:\n return None\n data = dict()\n for k, v in zip(keys, values[0]):\n if k in data:\n tmp = data[k]\n if isinstance(tmp, list):\n data[k].append(v)\n else:\n data[k] = [tmp, v]\n else:\n data[k] = v\n return data"
},
{
"identifier": "format_load",
"path": "teval/utils/format_load.py",
"snippet": "def format_load(raw_data: str, start_character: str = '', end_character: str = ''):\n \"\"\"Format the raw data into the format that can be evaluated.\n\n Args:\n raw_data (str): The raw data.\n start_character (str, optional): The start character. Defaults to '', if using it, the string will be sliced from the first start_character.\n end_character (str, optional): The end character. Defaults to '', if using it, the string will be sliced to the last end_character.\n\n Returns:\n str: The formatted data.\n \"\"\"\n if type(raw_data) != str:\n # the data has been evaluated\n return raw_data\n if \"```json\" in raw_data:\n raw_data = raw_data[raw_data.find(\"```json\") + len(\"```json\"):]\n raw_data = raw_data.strip(\"`\")\n if start_character != '':\n raw_data = raw_data[raw_data.find(start_character):]\n if end_character != '':\n raw_data = raw_data[:raw_data.rfind(end_character) + len(end_character)]\n successful_parse = False\n try:\n data = ast.literal_eval(raw_data)\n successful_parse = True\n except Exception as e:\n pass\n try:\n if not successful_parse:\n data = json.loads(raw_data)\n successful_parse = True\n except Exception as e:\n pass\n try:\n if not successful_parse:\n data = json.loads(raw_data.replace(\"\\'\", \"\\\"\"))\n successful_parse = True\n except Exception as e:\n pass\n if not successful_parse:\n raise Exception(\"Cannot parse raw data\")\n return data"
},
{
"identifier": "ResponseDataSample",
"path": "teval/schema.py",
"snippet": "class ResponseDataSample:\n \"\"\"\n Args:\n template(str): Format string with keyword-only arguments. For\n example '{who} like {what}'\n pred(Any): Parsed data from LLM generating response.\n gt(Any): Ground truth data\n meta_data(dict, optional): Meta information will be used to evaluate\n LLM's response\n \"\"\"\n template: str\n pred: Any\n gt: Any\n meta_data: dict = None"
}
] | from collections import defaultdict
from mmengine import load
from teval.utils.template import parse_string
from teval.utils.format_load import format_load
from teval.schema import ResponseDataSample
import json
import ast
import numpy as np | 1,927 |
class InstructEvaluator:
"""Instruct Following Evaluation
Args:
dataset_path(str): File path of evaluation dataset.
"""
def __init__(
self,
dataset_path: str,
**kwargs,
) -> None:
self.dataset_path = dataset_path
def _load_dataset(self):
self.dataset = []
dataset = load(self.dataset_path)
for key in dataset.keys():
datum = dataset[key]
data_sample = self._process_response(datum)
self.dataset.append(
dict(
origin_prompt=datum["origin_prompt"],
response_data_sample=data_sample))
self.num_samples = len(self.dataset)
def _process_response(
self,
datum: dict,
) -> ResponseDataSample:
"""Process the response to needed format.
Args:
datum(dict): inputs.
Returns:
dict: Processed response data sample.
"""
# Dict with keyword-only arguments.
template = datum['template']
# Generated response.
pred_data = datum['prediction']
# Response of ground truth.
gt_data = datum['ground_truth']
meta_data = datum['meta_data']
return ResponseDataSample(
template=template, pred=pred_data, gt=gt_data, meta_data=meta_data)
def _evaluate(self, data_sample: dict) -> dict:
metrics_result = dict()
response_format = data_sample.meta_data['response_format']
if response_format == 'json':
pred_data = self.json_format_parse(data_sample)
else:
pred_data = self.string_format_parse(data_sample)
if pred_data is None:
# directly set to 0 for all metrics
metrics_result[f'{response_format}_format_metric'] = 0
metrics_result[f'{response_format}_args_em_metric'] = 0
return metrics_result
# Exact matching
metrics_result[f'{response_format}_format_metric'] = 1
metrics_result[f'{response_format}_args_em_metric'] = self.compute_args_em_metric(
gt_action=data_sample.gt['action'], pred_action=pred_data['action'],
gt_args=data_sample.gt['args'], pred_args=pred_data['args']
)
return metrics_result
def compute_args_em_metric(self, gt_action, pred_action, gt_args, pred_args):
cnt = 0.
if gt_action == pred_action:
cnt += 1.
num_args = len(gt_args) + 1 # 1 means action name match
for gt_key in gt_args:
pred_val = pred_args.get(gt_key, "")
if pred_val == gt_args[gt_key]:
cnt += 1.
return cnt / num_args
def string_format_parse(self, data_sample):
pred_data = data_sample.pred
template = data_sample.template
thought_start = template['thought_start']
thought_end = template['thought_end']
action_start = template['action_start']
action_end = template['action_end']
args_start = template['args_start']
args_end = template['args_end']
parse_template = thought_start + "{thought}" + thought_end \
+ action_start + "{action}" + action_end \
+ args_start + "{args}" + args_end
res = parse_string(parse_template, pred_data, allow_newline=True)
try:
if res is not None:
args = ast.literal_eval(res['args'].strip())
res['args'] = args if isinstance(args, dict) else {}
res['action'] = res['action'].strip()
return res
except:
return dict(thought=res['thought'], action=res['action'].strip(), args=dict())
def json_format_parse(self, data_sample):
try:
|
class InstructEvaluator:
"""Instruct Following Evaluation
Args:
dataset_path(str): File path of evaluation dataset.
"""
def __init__(
self,
dataset_path: str,
**kwargs,
) -> None:
self.dataset_path = dataset_path
def _load_dataset(self):
self.dataset = []
dataset = load(self.dataset_path)
for key in dataset.keys():
datum = dataset[key]
data_sample = self._process_response(datum)
self.dataset.append(
dict(
origin_prompt=datum["origin_prompt"],
response_data_sample=data_sample))
self.num_samples = len(self.dataset)
def _process_response(
self,
datum: dict,
) -> ResponseDataSample:
"""Process the response to needed format.
Args:
datum(dict): inputs.
Returns:
dict: Processed response data sample.
"""
# Dict with keyword-only arguments.
template = datum['template']
# Generated response.
pred_data = datum['prediction']
# Response of ground truth.
gt_data = datum['ground_truth']
meta_data = datum['meta_data']
return ResponseDataSample(
template=template, pred=pred_data, gt=gt_data, meta_data=meta_data)
def _evaluate(self, data_sample: dict) -> dict:
metrics_result = dict()
response_format = data_sample.meta_data['response_format']
if response_format == 'json':
pred_data = self.json_format_parse(data_sample)
else:
pred_data = self.string_format_parse(data_sample)
if pred_data is None:
# directly set to 0 for all metrics
metrics_result[f'{response_format}_format_metric'] = 0
metrics_result[f'{response_format}_args_em_metric'] = 0
return metrics_result
# Exact matching
metrics_result[f'{response_format}_format_metric'] = 1
metrics_result[f'{response_format}_args_em_metric'] = self.compute_args_em_metric(
gt_action=data_sample.gt['action'], pred_action=pred_data['action'],
gt_args=data_sample.gt['args'], pred_args=pred_data['args']
)
return metrics_result
def compute_args_em_metric(self, gt_action, pred_action, gt_args, pred_args):
cnt = 0.
if gt_action == pred_action:
cnt += 1.
num_args = len(gt_args) + 1 # 1 means action name match
for gt_key in gt_args:
pred_val = pred_args.get(gt_key, "")
if pred_val == gt_args[gt_key]:
cnt += 1.
return cnt / num_args
def string_format_parse(self, data_sample):
pred_data = data_sample.pred
template = data_sample.template
thought_start = template['thought_start']
thought_end = template['thought_end']
action_start = template['action_start']
action_end = template['action_end']
args_start = template['args_start']
args_end = template['args_end']
parse_template = thought_start + "{thought}" + thought_end \
+ action_start + "{action}" + action_end \
+ args_start + "{args}" + args_end
res = parse_string(parse_template, pred_data, allow_newline=True)
try:
if res is not None:
args = ast.literal_eval(res['args'].strip())
res['args'] = args if isinstance(args, dict) else {}
res['action'] = res['action'].strip()
return res
except:
return dict(thought=res['thought'], action=res['action'].strip(), args=dict())
def json_format_parse(self, data_sample):
try: | pred_data = format_load(data_sample.pred) | 1 | 2023-12-10 05:18:46+00:00 | 4k |
rabilrbl/gemini-pro-bot | gemini_pro_bot/bot.py | [
{
"identifier": "AuthFilter",
"path": "gemini_pro_bot/filters.py",
"snippet": "_AUTHORIZED_USERS = [\n i.strip() for i in os.getenv(\"AUTHORIZED_USERS\", \"\").split(\",\") if i.strip()\n]\nclass AuthorizedUserFilter(UpdateFilter):\n def filter(self, update: Update):"
},
{
"identifier": "start",
"path": "gemini_pro_bot/handlers.py",
"snippet": "async def start(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"Send a message when the command /start is issued.\"\"\"\n user = update.effective_user\n await update.message.reply_html(\n f\"Hi {user.mention_html()}!\\n\\nStart sending messages with me to generate a response.\\n\\nSend /new to start a new chat session.\",\n # reply_markup=ForceReply(selective=True),\n )"
},
{
"identifier": "help_command",
"path": "gemini_pro_bot/handlers.py",
"snippet": "async def help_command(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"Send a message when the command /help is issued.\"\"\"\n help_text = \"\"\"\nBasic commands:\n/start - Start the bot\n/help - Get help. Shows this message\n\nChat commands:\n/new - Start a new chat session (model will forget previously generated messages)\n\nSend a message to the bot to generate a response.\n\"\"\"\n await update.message.reply_text(help_text)"
},
{
"identifier": "newchat_command",
"path": "gemini_pro_bot/handlers.py",
"snippet": "async def newchat_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"Start a new chat session.\"\"\"\n init_msg = await update.message.reply_text(\n text=\"Starting new chat session...\",\n reply_to_message_id=update.message.message_id,\n )\n new_chat(context)\n await init_msg.edit_text(\"New chat session started.\")"
},
{
"identifier": "handle_message",
"path": "gemini_pro_bot/handlers.py",
"snippet": "async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"Handles incoming text messages from users.\n\n Checks if a chat session exists for the user, initializes a new session if not.\n Sends the user's message to the chat session to generate a response.\n Streams the response back to the user, handling any errors.\n \"\"\"\n if context.chat_data.get(\"chat\") is None:\n new_chat(context)\n text = update.message.text\n init_msg = await update.message.reply_text(\n text=\"Generating...\", reply_to_message_id=update.message.message_id\n )\n await update.message.chat.send_action(ChatAction.TYPING)\n # Generate a response using the text-generation pipeline\n chat = context.chat_data.get(\"chat\") # Get the chat session for this chat\n response = None\n try:\n response = await chat.send_message_async(\n text, stream=True\n ) # Generate a response\n except StopCandidateException as sce:\n print(\"Prompt: \", text, \" was stopped. User: \", update.message.from_user)\n print(sce)\n await init_msg.edit_text(\"The model unexpectedly stopped generating.\")\n chat.rewind() # Rewind the chat session to prevent the bot from getting stuck\n return\n except BlockedPromptException as bpe:\n print(\"Prompt: \", text, \" was blocked. User: \", update.message.from_user)\n print(bpe)\n await init_msg.edit_text(\"Blocked due to safety concerns.\")\n if response:\n # Resolve the response to prevent the chat session from getting stuck\n await response.resolve()\n return\n full_plain_message = \"\"\n # Stream the responses\n async for chunk in response:\n try:\n if chunk.text:\n full_plain_message += chunk.text\n message = format_message(full_plain_message)\n init_msg = await init_msg.edit_text(\n text=message,\n parse_mode=ParseMode.HTML,\n disable_web_page_preview=True,\n )\n except StopCandidateException as sce:\n await init_msg.edit_text(\"The model unexpectedly stopped generating.\")\n chat.rewind() # Rewind the chat session to prevent the bot from getting stuck\n continue\n except BadRequest:\n await response.resolve() # Resolve the response to prevent the chat session from getting stuck\n continue\n except NetworkError:\n raise NetworkError(\n \"Looks like you're network is down. Please try again later.\"\n )\n except IndexError:\n await init_msg.reply_text(\n \"Some index error occurred. This response is not supported.\"\n )\n await response.resolve()\n continue\n except Exception as e:\n print(e)\n if chunk.text:\n full_plain_message = chunk.text\n message = format_message(full_plain_message)\n init_msg = await update.message.reply_text(\n text=message,\n parse_mode=ParseMode.HTML,\n reply_to_message_id=init_msg.message_id,\n disable_web_page_preview=True,\n )\n # Sleep for a bit to prevent the bot from getting rate-limited\n await asyncio.sleep(0.1)"
},
{
"identifier": "handle_image",
"path": "gemini_pro_bot/handlers.py",
"snippet": "async def handle_image(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"Handle incoming images with captions and generate a response.\"\"\"\n init_msg = await update.message.reply_text(\n text=\"Generating...\", reply_to_message_id=update.message.message_id\n )\n images = update.message.photo\n unique_images: dict = {}\n for img in images:\n file_id = img.file_id[:-7]\n if file_id not in unique_images:\n unique_images[file_id] = img\n elif img.file_size > unique_images[file_id].file_size:\n unique_images[file_id] = img\n file_list = list(unique_images.values())\n file = await file_list[0].get_file()\n a_img = load_image.open(BytesIO(await file.download_as_bytearray()))\n prompt = None\n if update.message.caption:\n prompt = update.message.caption\n else:\n prompt = \"Analyse this image and generate response\"\n response = await img_model.generate_content_async([prompt, a_img], stream=True)\n full_plain_message = \"\"\n async for chunk in response:\n try:\n if chunk.text:\n full_plain_message += chunk.text\n message = format_message(full_plain_message)\n init_msg = await init_msg.edit_text(\n text=message,\n parse_mode=ParseMode.HTML,\n disable_web_page_preview=True,\n )\n except StopCandidateException:\n await init_msg.edit_text(\"The model unexpectedly stopped generating.\")\n except BadRequest:\n await response.resolve()\n continue\n except NetworkError:\n raise NetworkError(\n \"Looks like you're network is down. Please try again later.\"\n )\n except IndexError:\n await init_msg.reply_text(\n \"Some index error occurred. This response is not supported.\"\n )\n await response.resolve()\n continue\n except Exception as e:\n print(e)\n if chunk.text:\n full_plain_message = chunk.text\n message = format_message(full_plain_message)\n init_msg = await update.message.reply_text(\n text=message,\n parse_mode=ParseMode.HTML,\n reply_to_message_id=init_msg.message_id,\n disable_web_page_preview=True,\n )\n await asyncio.sleep(0.1)"
}
] | import os
from telegram import Update
from telegram.ext import (
CommandHandler,
MessageHandler,
Application,
)
from gemini_pro_bot.filters import AuthFilter, MessageFilter, PhotoFilter
from dotenv import load_dotenv
from gemini_pro_bot.handlers import (
start,
help_command,
newchat_command,
handle_message,
handle_image,
) | 1,865 |
load_dotenv()
def start_bot() -> None:
"""Start the bot."""
# Create the Application and pass it your bot's token.
application = Application.builder().token(os.getenv("BOT_TOKEN")).build()
# on different commands - answer in Telegram
application.add_handler(CommandHandler("start", start, filters=AuthFilter))
application.add_handler(CommandHandler("help", help_command, filters=AuthFilter))
application.add_handler(CommandHandler("new", newchat_command, filters=AuthFilter))
# Any text message is sent to LLM to generate a response
|
load_dotenv()
def start_bot() -> None:
"""Start the bot."""
# Create the Application and pass it your bot's token.
application = Application.builder().token(os.getenv("BOT_TOKEN")).build()
# on different commands - answer in Telegram
application.add_handler(CommandHandler("start", start, filters=AuthFilter))
application.add_handler(CommandHandler("help", help_command, filters=AuthFilter))
application.add_handler(CommandHandler("new", newchat_command, filters=AuthFilter))
# Any text message is sent to LLM to generate a response | application.add_handler(MessageHandler(MessageFilter, handle_message)) | 0 | 2023-12-14 16:57:14+00:00 | 4k |
nox-410/tvm.tl | tests/python/contrib/test_hexagon/test_relay_simplify_conv_pat.py | [
{
"identifier": "build_module",
"path": "tests/python/contrib/test_hexagon/infrastructure.py",
"snippet": "def build_module(relay_mod, target):\n \"\"\"builds a relay module for a specified target\"\"\"\n params = {}\n executor = Executor(\"aot\", {\"link-params\": True})\n lowered = tvm.relay.build(\n relay_mod,\n tvm.target.Target(target, host=target),\n executor=executor,\n params=params,\n )\n return lowered"
},
{
"identifier": "run_module",
"path": "tests/python/contrib/test_hexagon/infrastructure.py",
"snippet": "def run_module(mod, inputs):\n \"\"\"invokes run function of specified module with inputs provided\"\"\"\n mod.set_input(**inputs)\n mod.run()\n output = mod.get_output(0).numpy()\n return output"
}
] | import numpy as np
import tvm
from tvm.runtime import ndarray as nd
from tvm import relay, testing
from tvm.contrib.hexagon.transform import simplify_conv_pat
from tvm.topi.utils import get_const_tuple
from tvm.contrib.hexagon.session import Session
from tvm.contrib.hexagon.pytest_plugin import HEXAGON_AOT_LLVM_TARGET
from .infrastructure import build_module, run_module | 1,811 | relay_mul_factor = relay.const(0.00392151, dtype="float32")
else:
relay_mul_factor = np.random.rand(*get_const_tuple(act_shape))
relay_mul_factor = relay.Constant(
nd.array(np.full(relay_mul_factor.shape, relay_mul_factor, dtype="float32"))
)
relay_sub_term = relay.const(0.5, dtype="float32")
relay_weights = relay.Constant(nd.array(np.full(weights.shape, weights, dtype="float32")))
relay_bias = relay.Constant(nd.array(np.full(bias.shape, bias, dtype="float32")))
return (relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias, data_in_float32)
def get_test_module_graph(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias):
"""Creates a test relay graph with the specified relay expressions"""
v1 = relay.multiply(relay_act, relay_mul_factor)
v2 = relay.subtract(v1, relay_sub_term)
v3 = relay.transpose(v2, axes=[0, 3, 1, 2])
weights_type_info = tvm.relay.transform.InferTypeLocal(relay_weights)
v4 = relay.nn.conv2d(
v3,
relay_weights,
padding=[1, 1, 1, 1],
channels=weights_type_info.shape[0],
kernel_size=[3, 3],
)
graph = relay.nn.bias_add(v4, relay_bias)
return graph
def get_test_module(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias):
"""Creates a test relay module and returns it."""
graph = get_test_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
func = relay.Function(relay.analysis.free_vars(graph), graph)
mod = tvm.IRModule.from_expr(func)
return mod
def get_expected_output_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
):
"""Creates the relay graph for expected output"""
v1 = relay.transpose(relay_act, axes=[0, 3, 1, 2])
v2 = relay.multiply(relay_mul_factor, relay_weights)
weights_type_info = tvm.relay.transform.InferTypeLocal(relay_weights)
v3 = relay.nn.conv2d(
v1, v2, padding=[1, 1, 1, 1], channels=weights_type_info.shape[0], kernel_size=[3, 3]
)
type_info = tvm.relay.transform.InferTypeLocal(v1)
relay_zero_act = relay.Constant(
nd.array(np.zeros(get_const_tuple(type_info.shape), dtype="float32"))
)
v4 = relay.subtract(relay_zero_act, relay_sub_term)
v5 = relay.nn.bias_add(v3, relay_bias)
v6 = relay.nn.conv2d(
v4,
relay_weights,
padding=[1, 1, 1, 1],
channels=weights_type_info.shape[0],
kernel_size=[3, 3],
)
return relay.add(v5, v6)
def get_expected_output_module(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
):
"""Returns manually created expected output module."""
graph = get_expected_output_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
out_func = relay.Function(relay.analysis.free_vars(graph), graph)
return tvm.IRModule.from_expr(out_func)
def get_test_modules():
"""generates test, expected modules and their inputs"""
(
relay_act,
relay_mul_factor,
relay_sub_term,
relay_weights,
relay_bias,
data_in_float32,
) = get_test_module_relay_exprs()
mod = get_test_module(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias)
exp_relay_mod = get_expected_output_module(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
return mod, exp_relay_mod, {"q1": data_in_float32}
@tvm.testing.requires_hexagon
def test_simplify_conv_pat(hexagon_session: Session):
"""A positive test case"""
(mod, exp_relay_mod, inputs) = get_test_modules()
with tvm.transform.PassContext(opt_level=3):
mod = tvm.relay.transform.InferType()(mod)
hexagon_lowered = build_module(
mod, tvm.target.Target(HEXAGON_AOT_LLVM_TARGET, host=HEXAGON_AOT_LLVM_TARGET)
)
with tvm.transform.PassContext(opt_level=3):
mod = simplify_conv_pat(mod)
mod = tvm.relay.transform.InferType()(mod)
exp_relay_mod = tvm.relay.transform.InferType()(exp_relay_mod)
assert tvm.ir.structural_equal(mod["main"], exp_relay_mod["main"], map_free_vars=True)
mod = tvm.relay.transform.FoldConstant()(mod)
hexagon_lowered_opt = build_module(
mod, tvm.target.Target(HEXAGON_AOT_LLVM_TARGET, host=HEXAGON_AOT_LLVM_TARGET)
)
# Run unoptimized llvm module
hexagon_mod = hexagon_session.get_executor_from_factory(hexagon_lowered)
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-wildcard-import, invalid-name
"""
Test hexagon relay transform - qnn.concat optimization
"""
def get_test_module_relay_exprs(isConstScalarMultiplier=True):
"""
Creates relay expressions that can be used both by
test module and expected output module
"""
act_shape = (1, 32, 32, 3)
data_in = np.random.rand(*get_const_tuple(act_shape))
data_in_float32 = np.full(data_in.shape, data_in, dtype="float32")
kernel_shape = (16, 3, 3, 3)
weights = np.random.rand(*get_const_tuple(kernel_shape))
bias = np.random.rand(get_const_tuple(kernel_shape)[0])
relay_act = relay.var("q1", shape=act_shape, dtype="float32")
if isConstScalarMultiplier:
relay_mul_factor = relay.const(0.00392151, dtype="float32")
else:
relay_mul_factor = np.random.rand(*get_const_tuple(act_shape))
relay_mul_factor = relay.Constant(
nd.array(np.full(relay_mul_factor.shape, relay_mul_factor, dtype="float32"))
)
relay_sub_term = relay.const(0.5, dtype="float32")
relay_weights = relay.Constant(nd.array(np.full(weights.shape, weights, dtype="float32")))
relay_bias = relay.Constant(nd.array(np.full(bias.shape, bias, dtype="float32")))
return (relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias, data_in_float32)
def get_test_module_graph(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias):
"""Creates a test relay graph with the specified relay expressions"""
v1 = relay.multiply(relay_act, relay_mul_factor)
v2 = relay.subtract(v1, relay_sub_term)
v3 = relay.transpose(v2, axes=[0, 3, 1, 2])
weights_type_info = tvm.relay.transform.InferTypeLocal(relay_weights)
v4 = relay.nn.conv2d(
v3,
relay_weights,
padding=[1, 1, 1, 1],
channels=weights_type_info.shape[0],
kernel_size=[3, 3],
)
graph = relay.nn.bias_add(v4, relay_bias)
return graph
def get_test_module(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias):
"""Creates a test relay module and returns it."""
graph = get_test_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
func = relay.Function(relay.analysis.free_vars(graph), graph)
mod = tvm.IRModule.from_expr(func)
return mod
def get_expected_output_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
):
"""Creates the relay graph for expected output"""
v1 = relay.transpose(relay_act, axes=[0, 3, 1, 2])
v2 = relay.multiply(relay_mul_factor, relay_weights)
weights_type_info = tvm.relay.transform.InferTypeLocal(relay_weights)
v3 = relay.nn.conv2d(
v1, v2, padding=[1, 1, 1, 1], channels=weights_type_info.shape[0], kernel_size=[3, 3]
)
type_info = tvm.relay.transform.InferTypeLocal(v1)
relay_zero_act = relay.Constant(
nd.array(np.zeros(get_const_tuple(type_info.shape), dtype="float32"))
)
v4 = relay.subtract(relay_zero_act, relay_sub_term)
v5 = relay.nn.bias_add(v3, relay_bias)
v6 = relay.nn.conv2d(
v4,
relay_weights,
padding=[1, 1, 1, 1],
channels=weights_type_info.shape[0],
kernel_size=[3, 3],
)
return relay.add(v5, v6)
def get_expected_output_module(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
):
"""Returns manually created expected output module."""
graph = get_expected_output_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
out_func = relay.Function(relay.analysis.free_vars(graph), graph)
return tvm.IRModule.from_expr(out_func)
def get_test_modules():
"""generates test, expected modules and their inputs"""
(
relay_act,
relay_mul_factor,
relay_sub_term,
relay_weights,
relay_bias,
data_in_float32,
) = get_test_module_relay_exprs()
mod = get_test_module(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias)
exp_relay_mod = get_expected_output_module(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
return mod, exp_relay_mod, {"q1": data_in_float32}
@tvm.testing.requires_hexagon
def test_simplify_conv_pat(hexagon_session: Session):
"""A positive test case"""
(mod, exp_relay_mod, inputs) = get_test_modules()
with tvm.transform.PassContext(opt_level=3):
mod = tvm.relay.transform.InferType()(mod)
hexagon_lowered = build_module(
mod, tvm.target.Target(HEXAGON_AOT_LLVM_TARGET, host=HEXAGON_AOT_LLVM_TARGET)
)
with tvm.transform.PassContext(opt_level=3):
mod = simplify_conv_pat(mod)
mod = tvm.relay.transform.InferType()(mod)
exp_relay_mod = tvm.relay.transform.InferType()(exp_relay_mod)
assert tvm.ir.structural_equal(mod["main"], exp_relay_mod["main"], map_free_vars=True)
mod = tvm.relay.transform.FoldConstant()(mod)
hexagon_lowered_opt = build_module(
mod, tvm.target.Target(HEXAGON_AOT_LLVM_TARGET, host=HEXAGON_AOT_LLVM_TARGET)
)
# Run unoptimized llvm module
hexagon_mod = hexagon_session.get_executor_from_factory(hexagon_lowered) | expected_output = run_module(hexagon_mod, inputs) | 1 | 2023-12-14 02:37:47+00:00 | 4k |
berlino/gated_linear_attention | kernels/intra_chunk_contribution/fn.py | [
{
"identifier": "FlashGRet",
"path": "kernels/intra_chunk_contribution/fn_only_gk.py",
"snippet": "class FlashGRet(torch.autograd.Function):\n @staticmethod\n def forward(ctx, q, k, gk):\n q = q.contiguous()\n k = k.contiguous()\n gk = gk.contiguous()\n \n # assert gk.dtype==torch.float32 \n # only support for Ampere now\n\n capability = torch.cuda.get_device_capability()\n if capability[0] < 8:\n raise RuntimeError(\"Flash attention currently only supported for compute capability >= 80\")\n\n # assert gk.dtype == gv.dtype == torch.float32 \n # for now.\n BLOCK_M = BLOCK_N = q.shape[-2]\n\n # shape constraints\n Lq, Lk = q.shape[-1], k.shape[-1]\n assert Lq == Lk \n if Lk > 128:\n assert Lk % 128 == 0\n\n BLOCK_DMODEL_QK = min(Lk, 128)\n ctx.BLOCK_DMODEL_QK = BLOCK_DMODEL_QK\n\n A = torch.zeros(max(1, Lk//128) , q.shape[0], q.shape[1], q.shape[2], BLOCK_N, BLOCK_N, device=q.device, dtype=q.dtype) \n\n\n\n grid = (q.shape[2] , q.shape[0] * q.shape[1], max(1, Lk//128)) \n\n # assert q.dtype == k.dtype == v.dtype \n _fwd_kernel_compute_A[grid](\n q, k, gk, A,\n q.stride(0), q.stride(1), q.stride(2), q.stride(3),\n ### be careful here!\n A.stride(1), A.stride(2), A.stride(3), A.stride(4),\n q.shape[0], q.shape[1], q.shape[2], q.shape[3], \n BLOCK_N=BLOCK_N, BLOCK_DMODEL_QK=BLOCK_DMODEL_QK, BLOCK_M=BLOCK_M, num_warps=8 if ctx.BLOCK_DMODEL_QK == 128 else 4, num_stages=8\n )\n\n ctx.save_for_backward(q, k, gk)\n ctx.grid = grid\n ctx.BLOCK_N = BLOCK_N\n ctx.BLOCK_N = BLOCK_N\n ctx.head = q.shape[1]\n return A.sum(0).to(q.dtype)\n\n\n\n @staticmethod\n def backward(ctx, dA):\n dA = dA.contiguous()\n q, k, gk = ctx.saved_tensors\n\n # appearantly, there is no sync issue when splitting K dim.\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dgk = torch.zeros_like(gk)\n \n\n BLOCK_N = ctx.BLOCK_N\n # for now.\n BLOCK_M = BLOCK_N\n # shape constraints\n Lq, Lk = q.shape[-1], k.shape[-1]\n\n _bwd_kernel_dqk[ctx.grid](\n q, k, gk, dA,\n dq, \n dk, dgk,\n q.stride(0), q.stride(1), q.stride(2), q.stride(3),\n dA.stride(0), dA.stride(1), dA.stride(2), dA.stride(3),\n q.shape[0], q.shape[1], q.shape[2], q.shape[3],\n BLOCK_N=BLOCK_N, BLOCK_DMODEL_QK=ctx.BLOCK_DMODEL_QK, BLOCK_M=BLOCK_M, num_warps=8 if ctx.BLOCK_DMODEL_QK == 128 else 4, num_stages=5\n )\n \n return dq, dk, dgk, None"
},
{
"identifier": "FlashGRet_O",
"path": "kernels/intra_chunk_contribution/fn_only_gv.py",
"snippet": "class FlashGRet_O(torch.autograd.Function):\n @staticmethod\n def forward(ctx, A, v, gv, chunk_size=16):\n assert gv.dtype == torch.float32\n # assert A.dtype == torch.float32\n\n # only support for Ampere now\n capability = torch.cuda.get_device_capability()\n if capability[0] < 8:\n raise RuntimeError(\"Flash attention currently only supported for compute capability >= 80\")\n \n # assert gk.dtype == gv.dtype == torch.float32 \n BLOCK_M = BLOCK_N = v.shape[-2]\n\n # shape constraints\n Lv = v.shape[-1]\n BLOCK_V = min(128, Lv)\n ctx.BLOCK_V = BLOCK_V \n\n assert v.shape[-1] % BLOCK_V == 0\n \n grid = (v.shape[2] , v.shape[0] * v.shape[1], max(1, v.shape[-1] // BLOCK_V))\n \n o = torch.empty_like(v) \n\n \n\n _fwd_compute_O[grid](A, v, gv, o,\n A.stride(0), A.stride(1), A.stride(2), A.stride(3),\n v.stride(0), v.stride(1), v.stride(2), v.stride(3),\n BLOCK_N=BLOCK_N, BLOCK_M=BLOCK_M,\n BLOCK_DMODEL_V=BLOCK_V, num_warps= 8 if BLOCK_V==128 else 4, num_stages=5\n )\n\n ctx.save_for_backward(A, v,gv, o)\n ctx.grid = grid \n ctx.chunk_size = chunk_size\n return o\n\n\n\n\n\n @staticmethod\n def backward(ctx, do):\n do = do.contiguous()\n A, v, gv, o = ctx.saved_tensors\n BLOCK_V = ctx.BLOCK_V\n assert v.shape[-1] % BLOCK_V == 0\n\n\n # dA = torch.empty_like(A)\n dv = torch.zeros_like(v)\n dgv = torch.zeros_like(gv)\n \n # for now.\n BLOCK_M = BLOCK_N = v.shape[-2]\n \n # shape constraints\n # Lv = v.shape[-1]\n # grid = (v.shape[2] , v.shape[0] * v.shape[1], v.shape[-1] // BLOCK_V)\n grid = ctx.grid \n\n dA = torch.empty(v.shape[-1] // BLOCK_V if BLOCK_V == 128 else 1, A.shape[0], A.shape[1], A.shape[2], A.shape[3], A.shape[3], device=A.device, dtype=A.dtype)\n\n _bwd_kernel_dav[grid](\n v, gv, A, o, \n do, dA,\n dv, dgv,\n v.shape[0], v.shape[1],\n A.stride(0), A.stride(1), A.stride(2), A.stride(3),\n v.stride(0), v.stride(1), v.stride(2), v.stride(3),\n BLOCK_N=BLOCK_N, BLOCK_M=BLOCK_M, \n BLOCK_DMODEL_V=ctx.BLOCK_V, num_warps=8, num_stages=4\n ) \n\n return dA.sum(0).to(A), dv.to(v), dgv.to(gv), None"
}
] | import torch
import time
import math
import torch
import torch.nn.functional as F
import torch
import triton
import triton.language as tl
import numpy as np
import math
from typing import Tuple, Union, Optional
from einops import rearrange
from .fn_only_gk import FlashGRet
from .fn_only_gv import FlashGRet_O | 1,924 |
def intra_chunk_onc(q, k, v, gk, gv):
assert q.is_contiguous()
assert k.is_contiguous()
assert v.is_contiguous()
if gk is not None:
assert gk.is_contiguous()
if gv is not None:
assert gv.is_contiguous()
# q = q.float()
# k = k.float()
# v = v.float()
origin_chunk_size = k.shape[-2]
assert k.shape[-2] % 16 == 0
if gk is not None:
|
def intra_chunk_onc(q, k, v, gk, gv):
assert q.is_contiguous()
assert k.is_contiguous()
assert v.is_contiguous()
if gk is not None:
assert gk.is_contiguous()
if gv is not None:
assert gv.is_contiguous()
# q = q.float()
# k = k.float()
# v = v.float()
origin_chunk_size = k.shape[-2]
assert k.shape[-2] % 16 == 0
if gk is not None: | A = FlashGRet.apply(q, k, gk) | 0 | 2023-12-11 18:13:44+00:00 | 4k |
kakaobrain/honeybee | eval_tasks.py | [
{
"identifier": "get_model",
"path": "pipeline/interface.py",
"snippet": "def get_model(pretrained_ckpt, use_bf16=True, load_in_8bit=False):\n \"\"\"Model Provider with tokenizer and processor.\n\n Args:\n pretrained_ckpt (string): The path to pre-trained checkpoint.\n use_bf16 (bool, optional): Whether to use bfloat16 to load the model. (Default: True)\n load_in_8bit(bool, optional): Flag to load model in 8it. (Default: False)\n\n Returns:\n model: Honeybee Model\n tokenizer: Honeybee (Llama) text tokenizer\n processor: Honeybee processor (including text and image)\n \"\"\"\n # Load model where base_ckpt is different when the target model is trained by PEFT\n model = load_model(pretrained_ckpt, use_bf16, load_in_8bit)\n\n image_size = model.config.vision_config.image_size\n num_query_tokens = model.config.num_query_tokens\n num_eos_tokens = getattr(model.config.visual_projector_config, \"num_eos_tokens\", 1)\n num_visual_tokens = num_query_tokens + num_eos_tokens\n\n # Build processor\n image_processor = HoneybeeImageProcessor(\n size=image_size,\n crop_size=image_size,\n image_mean=OPENAI_CLIP_MEAN,\n image_std=OPENAI_CLIP_STD,\n )\n # Load tokenizer (LlamaTokenizer)\n tokenizer_ckpt = model.config.lm_config.pretrained_tokenizer_name_or_path\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_ckpt, use_fast=False)\n if tokenizer.pad_token is None:\n tokenizer.pad_token = tokenizer.unk_token\n processor = HoneybeeProcessor(\n image_processor, tokenizer, num_visual_token=num_visual_tokens\n )\n\n return model, tokenizer, processor"
},
{
"identifier": "build_task",
"path": "tasks/build.py",
"snippet": "def build_task(model, tokenizer, processor, task_config):\n # build dataset\n dataset = build_dataset(processor=processor, **task_config.dataset)\n loader = build_dataloader(dataset, **task_config.dataloader)\n\n # build task\n task_name = task_config.name\n task_class = TASK_DICT[task_name]\n task = task_class(model, tokenizer, processor, loader, task_config.gen_kwargs)\n\n return task"
},
{
"identifier": "get_logger",
"path": "utils/logging.py",
"snippet": "def get_logger(name=\"default\", log_file=None, log_level=logging.INFO, file_mode='w'):\n \"\"\"Initialize and get a logger by name.\n\n If the logger has not been initialized, this method will initialize the\n logger by adding one or two handlers, otherwise the initialized logger will\n be directly returned. During initialization, a StreamHandler will always be\n added. If `log_file` is specified and the process rank is 0, a FileHandler\n will also be added.\n\n Args:\n name (str): Logger name.\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the logger.\n log_level (int): The logger level. Note that only the process of\n rank 0 is affected, and other processes will set the level to\n \"Error\" thus be silent most of the time.\n file_mode (str): The file mode used in opening log file.\n Defaults to 'w'.\n\n Returns:\n logging.Logger: The expected logger.\n \"\"\"\n logger = logging.getLogger(name)\n if name in logger_initialized:\n return logger\n # handle hierarchical names\n # e.g., logger \"a\" is initialized, then logger \"a.b\" will skip the\n # initialization since it is a child of \"a\".\n for logger_name in logger_initialized:\n if name.startswith(logger_name):\n return logger\n\n # handle duplicate logs to the console\n # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler <stderr> (NOTSET)\n # to the root logger. As logger.propagate is True by default, this root\n # level handler causes logging messages from rank>0 processes to\n # unexpectedly show up on the console, creating much unwanted clutter.\n # To fix this issue, we set the root logger's StreamHandler, if any, to log\n # at the ERROR level.\n for handler in logger.root.handlers:\n if type(handler) is logging.StreamHandler:\n handler.setLevel(logging.ERROR)\n\n stream_handler = logging.StreamHandler()\n handlers = [stream_handler]\n\n if dist.is_available() and dist.is_initialized():\n rank = dist.get_rank()\n else:\n rank = 0\n\n # only rank 0 will add a FileHandler\n if rank == 0 and log_file is not None:\n # Here, the default behaviour of the official logger is 'a'. Thus, we\n # provide an interface to change the file mode to the default\n # behaviour.\n file_handler = logging.FileHandler(log_file, file_mode)\n handlers.append(file_handler)\n\n # log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n log_format = \"%(levelname)s %(asctime)s | %(message)s\"\n date_format = \"%m/%d %H:%M:%S\"\n formatter = logging.Formatter(log_format, date_format)\n for handler in handlers:\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n logger.addHandler(handler)\n\n if rank == 0:\n logger.setLevel(log_level)\n else:\n logger.setLevel(logging.ERROR)\n\n logger_initialized[name] = True\n\n return logger"
}
] | import argparse
import os
import torch
import utils
from sconf import Config
from torch.distributed import destroy_process_group, init_process_group
from pipeline.interface import get_model
from tasks import build_task
from utils.logging import get_logger | 1,791 |
parser = argparse.ArgumentParser()
parser.add_argument(
"--ckpt_path",
type=str,
help="Path to the trained checkpoint.",
)
parser.add_argument(
"--result_dir",
type=str,
default="eval_results/",
help="Path to the result files.",
)
parser.add_argument("--config", nargs="+", required=True, help="Task configs.")
parser.add_argument(
"--load_results",
action="store_true",
help="Load saved results without model inference. Only for the results without re-formatted.",
)
parser.add_argument(
"--dump_submission_file",
action="store_true",
help="Dump a submission file with a specific format to evaluate on a evaluation server.",
)
parser.add_argument(
"--batch_size", "-B",
type=int,
default=None,
help="Per-device batch size for evaluation. (default: use the value in the config)",
)
logger = get_logger()
def dist_setup():
# Expected to use torchrun
init_process_group(backend="nccl")
torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
def init(ckpt_path, load_results=False):
if load_results:
logger.info("Skip init model in load_results mode.")
return None, None, None
logger.info("Init (load model, tokenizer, processor) ...")
# create model
model, tokenizer, processor = get_model(ckpt_path)
model.cuda()
logger.info(" -- Init done.")
return model, tokenizer, processor
def eval_single(
model,
tokenizer,
processor,
config_path,
result_dir,
load_results=False,
dump_submission_file=False,
):
task_config = Config(config_path)
task_config = next(iter(task_config.values())) # get first child
if args.batch_size is not None:
task_config.dataloader.batch_size = args.batch_size
if utils.is_main_process():
print("=" * 80)
print(Config(task_config).dumps())
print("=" * 80)
task_name = task_config.name
|
parser = argparse.ArgumentParser()
parser.add_argument(
"--ckpt_path",
type=str,
help="Path to the trained checkpoint.",
)
parser.add_argument(
"--result_dir",
type=str,
default="eval_results/",
help="Path to the result files.",
)
parser.add_argument("--config", nargs="+", required=True, help="Task configs.")
parser.add_argument(
"--load_results",
action="store_true",
help="Load saved results without model inference. Only for the results without re-formatted.",
)
parser.add_argument(
"--dump_submission_file",
action="store_true",
help="Dump a submission file with a specific format to evaluate on a evaluation server.",
)
parser.add_argument(
"--batch_size", "-B",
type=int,
default=None,
help="Per-device batch size for evaluation. (default: use the value in the config)",
)
logger = get_logger()
def dist_setup():
# Expected to use torchrun
init_process_group(backend="nccl")
torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
def init(ckpt_path, load_results=False):
if load_results:
logger.info("Skip init model in load_results mode.")
return None, None, None
logger.info("Init (load model, tokenizer, processor) ...")
# create model
model, tokenizer, processor = get_model(ckpt_path)
model.cuda()
logger.info(" -- Init done.")
return model, tokenizer, processor
def eval_single(
model,
tokenizer,
processor,
config_path,
result_dir,
load_results=False,
dump_submission_file=False,
):
task_config = Config(config_path)
task_config = next(iter(task_config.values())) # get first child
if args.batch_size is not None:
task_config.dataloader.batch_size = args.batch_size
if utils.is_main_process():
print("=" * 80)
print(Config(task_config).dumps())
print("=" * 80)
task_name = task_config.name | task = build_task(model, tokenizer, processor, task_config) | 1 | 2023-12-06 14:48:41+00:00 | 4k |
NVlabs/RADIO | radio/hf_model.py | [
{
"identifier": "eradio",
"path": "radio/eradio_model.py",
"snippet": "@register_model\ndef eradio(pretrained=False, **kwargs):\n return fastervit2_large_fullres_ws16(pretrained=pretrained, **kwargs)"
},
{
"identifier": "create_model_from_args",
"path": "radio/radio_model.py",
"snippet": "def create_model_from_args(args) -> nn.Module:\n in_chans = 3\n if args.in_chans is not None:\n in_chans = args.in_chans\n elif args.input_size is not None:\n in_chans = args.input_size[0]\n\n # Skip weight initialization unless it's explicitly requested.\n weight_init = args.model_kwargs.pop(\"weight_init\", \"skip\")\n\n model = create_model(\n args.model,\n pretrained=args.pretrained,\n in_chans=in_chans,\n num_classes=args.num_classes,\n drop_rate=args.drop,\n drop_path_rate=args.drop_path,\n drop_block_rate=args.drop_block,\n global_pool=args.gp,\n bn_momentum=args.bn_momentum,\n bn_eps=args.bn_eps,\n scriptable=args.torchscript,\n checkpoint_path=args.initial_checkpoint,\n weight_init=weight_init,\n **args.model_kwargs,\n )\n\n assert (\n not args.cls_token_per_teacher or args.cpe_max_size is not None\n ), \"CPE must be enabled for multiple CLS tokens!\"\n\n if args.cpe_max_size is not None:\n enable_cpe(\n model,\n args.cpe_max_size,\n num_cls_tokens=len(args.teachers) if args.cls_token_per_teacher else 1,\n register_multiple=args.register_multiple,\n )\n\n return model"
},
{
"identifier": "RADIOModel",
"path": "radio/radio_model.py",
"snippet": "class RADIOModel(nn.Module):\n def __init__(\n self,\n model: nn.Module,\n input_conditioner: InputConditioner,\n return_summary: bool,\n return_spatial_features: bool,\n ):\n super().__init__()\n\n self.model = model\n self.input_conditioner = input_conditioner\n self.return_summary = return_summary\n self.return_spatial_features = return_spatial_features\n\n def forward(self, x: torch.Tensor):\n x = self.input_conditioner(x)\n\n y = self.model.forward_features(x)\n\n if isinstance(y, (list, tuple)):\n summary, all_feat = y\n elif isinstance(self.model, VisionTransformer):\n patch_gen = getattr(self.model, \"patch_generator\", None)\n if patch_gen is not None:\n summary = y[:, : patch_gen.num_cls_tokens].flatten(1)\n all_feat = y[:, patch_gen.num_skip :]\n elif self.model.global_pool == \"avg\":\n summary = y[:, self.model.num_prefix_tokens :].mean(dim=1)\n all_feat = y\n else:\n summary = y[:, 0]\n all_feat = y[:, 1:]\n else:\n raise ValueError(\"Unsupported model type\")\n\n if self.return_summary and self.return_spatial_features:\n return summary, all_feat\n elif self.return_summary:\n return summary\n return all_feat"
},
{
"identifier": "get_default_conditioner",
"path": "radio/input_conditioner.py",
"snippet": "def get_default_conditioner():\n from timm.data.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD\n\n return InputConditioner(\n input_scale=1.0,\n norm_mean=OPENAI_CLIP_MEAN,\n norm_std=OPENAI_CLIP_STD,\n )"
},
{
"identifier": "InputConditioner",
"path": "radio/input_conditioner.py",
"snippet": "class InputConditioner(nn.Module):\n def __init__(self,\n input_scale: float,\n norm_mean: norm_t,\n norm_std: norm_t,\n dtype: torch.dtype = torch.float32,\n ):\n super().__init__()\n\n self.dtype = dtype\n\n # self.input_scale = input_scale\n self.register_buffer(\"norm_mean\", _to_tensor(norm_mean) / input_scale)\n self.register_buffer(\"norm_std\", _to_tensor(norm_std) / input_scale)\n\n def forward(self, x: torch.Tensor):\n # x = x * self.input_scale\n y = (x - self.norm_mean) / self.norm_std\n return y.to(self.dtype)"
}
] | from collections import namedtuple
from typing import Optional
from timm.models import VisionTransformer
from transformers import PretrainedConfig, PreTrainedModel
from .eradio_model import eradio
from .radio_model import create_model_from_args
from .radio_model import RADIOModel as RADIOModelBase
from .input_conditioner import get_default_conditioner, InputConditioner
import torch | 1,776 | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RADIOConfig(PretrainedConfig):
"""Pretrained Hugging Face configuration for RADIO models."""
def __init__(
self,
args: Optional[dict] = None,
version: Optional[str] = "v1",
return_summary: Optional[bool] = True,
return_spatial_features: Optional[bool] = True,
**kwargs,
):
self.args = args
self.version = version
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
super().__init__(**kwargs)
class RADIOModel(PreTrainedModel):
"""Pretrained Hugging Face model for RADIO.
This class inherits from PreTrainedModel, which provides
HuggingFace's functionality for loading and saving models.
"""
config_class = RADIOConfig
def __init__(self, config):
super().__init__(config)
RADIOArgs = namedtuple("RADIOArgs", config.args.keys())
args = RADIOArgs(**config.args)
self.config = config
model = create_model_from_args(args)
input_conditioner: InputConditioner = get_default_conditioner()
self.radio_model = RADIOModelBase(
model,
input_conditioner,
config.return_summary,
config.return_spatial_features,
)
@property
def model(self) -> VisionTransformer:
return self.radio_model.model
@property
def input_conditioner(self) -> InputConditioner:
return self.radio_model.input_conditioner
def forward(self, x: torch.Tensor):
return self.radio_model.forward(x)
class ERADIOConfig(PretrainedConfig):
"""Pretrained Hugging Face configuration for ERADIO models."""
def __init__(
self,
args: Optional[dict] = None,
version: Optional[str] = "v1",
return_summary: Optional[bool] = True,
return_spatial_features: Optional[bool] = True,
**kwargs,
):
self.args = args
self.version = version
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
super().__init__(**kwargs)
class ERADIOModel(PreTrainedModel):
"""Pretrained Hugging Face model for ERADIO.
This class inherits from PreTrainedModel, which provides
HuggingFace's functionality for loading and saving models.
"""
config_class = ERADIOConfig
def __init__(self, config):
super().__init__(config)
config.args["in_chans"] = 3
config.args["num_classes"] = 0
config.args["return_full_features"] = config.return_spatial_features
self.config = config
| # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RADIOConfig(PretrainedConfig):
"""Pretrained Hugging Face configuration for RADIO models."""
def __init__(
self,
args: Optional[dict] = None,
version: Optional[str] = "v1",
return_summary: Optional[bool] = True,
return_spatial_features: Optional[bool] = True,
**kwargs,
):
self.args = args
self.version = version
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
super().__init__(**kwargs)
class RADIOModel(PreTrainedModel):
"""Pretrained Hugging Face model for RADIO.
This class inherits from PreTrainedModel, which provides
HuggingFace's functionality for loading and saving models.
"""
config_class = RADIOConfig
def __init__(self, config):
super().__init__(config)
RADIOArgs = namedtuple("RADIOArgs", config.args.keys())
args = RADIOArgs(**config.args)
self.config = config
model = create_model_from_args(args)
input_conditioner: InputConditioner = get_default_conditioner()
self.radio_model = RADIOModelBase(
model,
input_conditioner,
config.return_summary,
config.return_spatial_features,
)
@property
def model(self) -> VisionTransformer:
return self.radio_model.model
@property
def input_conditioner(self) -> InputConditioner:
return self.radio_model.input_conditioner
def forward(self, x: torch.Tensor):
return self.radio_model.forward(x)
class ERADIOConfig(PretrainedConfig):
"""Pretrained Hugging Face configuration for ERADIO models."""
def __init__(
self,
args: Optional[dict] = None,
version: Optional[str] = "v1",
return_summary: Optional[bool] = True,
return_spatial_features: Optional[bool] = True,
**kwargs,
):
self.args = args
self.version = version
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
super().__init__(**kwargs)
class ERADIOModel(PreTrainedModel):
"""Pretrained Hugging Face model for ERADIO.
This class inherits from PreTrainedModel, which provides
HuggingFace's functionality for loading and saving models.
"""
config_class = ERADIOConfig
def __init__(self, config):
super().__init__(config)
config.args["in_chans"] = 3
config.args["num_classes"] = 0
config.args["return_full_features"] = config.return_spatial_features
self.config = config | model = eradio(**config.args) | 0 | 2023-12-08 19:53:01+00:00 | 4k |
taikinman/langrila | src/langrila/utils.py | [
{
"identifier": "_TILE_SIZE",
"path": "src/langrila/model_config.py",
"snippet": "_TILE_SIZE = 512"
},
{
"identifier": "_TOKENS_PER_TILE",
"path": "src/langrila/model_config.py",
"snippet": "_TOKENS_PER_TILE = 170"
},
{
"identifier": "MODEL_CONFIG",
"path": "src/langrila/model_config.py",
"snippet": "MODEL_CONFIG = {}"
}
] | import base64
import io
import math
import os
import numpy as np
import openai
import tiktoken
from typing import Optional, Union
from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI
from PIL import Image
from .model_config import _TILE_SIZE, _TOKENS_PER_TILE, MODEL_CONFIG | 2,200 |
def get_n_tokens(message: dict[str, str | list[dict[str, str|dict[str, str]]]] , model_name: str) -> int:
"""
Return the number of tokens used by a list of messages.
Forked and edited from : https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
"""
try:
encoding = tiktoken.encoding_for_model(model_name)
except KeyError:
# print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model_name in MODEL_ZOO:
if model_name == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
else:
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_n_tokens() is not implemented for model {model_name}. Please choose from following model : {', '.join(sorted(list(MODEL_ZOO)))}."
)
n_content_tokens = 0
n_other_tokens = tokens_per_message
# num_tokens += tokens_per_message
for key, value in message.items():
if key == "content":
if "vision" in model_name and isinstance(value, list):
for item in value: # value type is list[dict[str, str|dict[str, str]]
if item["type"] == "text":
n_content_tokens += len(encoding.encode(item["text"]))
elif item["type"] == "image_url":
n_content_tokens += 85 # Base tokens
if item["image_url"]["detail"] == "high":
if item["image_url"]["url"].startswith("data:image/jpeg;base64,"):
img_encoded = item["image_url"]["url"].replace(
"data:image/jpeg;base64,", ""
)
n_content_tokens += calculate_high_resolution_image_tokens(
decode_image(img_encoded).size
)
elif item["image_url"]["url"].startswith("https://"):
raise NotImplementedError(
"Image URL is not acceptable. Please use base64 encoded image."
)
else:
raise ValueError(f"Unknown type {item['type']} in message['content'].")
else:
n_content_tokens += len(encoding.encode(value))
elif key == "name":
n_other_tokens += tokens_per_name
else:
n_other_tokens += len(encoding.encode(value))
n_other_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
total_tokens = n_content_tokens + n_other_tokens
return {"total": total_tokens, "content": n_content_tokens, "other": n_other_tokens}
def get_token_limit(model_name: str):
if model_name in MODEL_ZOO:
return MODEL_CONFIG[model_name]["max_tokens"]
else:
raise NotImplementedError(
f"get_token_limit() is not implemented for model {model_name}. Please choose from following model : {', '.join(sorted(list(MODEL_ZOO)))}."
)
def make_batch(iterable, batch_size=1):
length = len(iterable)
for ndx in range(0, length, batch_size):
yield iterable[ndx : min(ndx + batch_size, length)]
def pil2bytes(image: Image.Image) -> bytes:
num_byteio = io.BytesIO()
image.save(num_byteio, format="jpeg")
image_bytes = num_byteio.getvalue()
return image_bytes
def encode_image(image):
if isinstance(image, Image.Image):
image_bytes = pil2bytes(image)
return base64.b64encode(image_bytes).decode("utf-8")
elif isinstance(image, np.ndarray):
image_pil = Image.fromarray(image)
image_bytes = pil2bytes(image_pil)
return base64.b64encode(image_bytes).decode("utf-8")
elif isinstance(image, bytes):
return base64.b64encode(image).decode("utf-8")
else:
raise ValueError(f"Type of {type(image)} is not supported for image.")
def decode_image(image_encoded):
image_encoded_utf = image_encoded.encode("utf-8")
image_bytes = base64.b64decode(image_encoded_utf)
byteio = io.BytesIO(image_bytes)
return Image.open(byteio)
def calculate_high_resolution_image_tokens(image_size: tuple[int, int] | list[int, int]):
h, w = image_size
short = min(h, w)
long = max(h, w)
if long > 2048:
short = int(short * 2048 / long)
long = 2048
if short > 768:
long = int(long * 768 / short)
short = 768
n_bins_long = math.ceil(long / _TILE_SIZE)
n_bins_short = math.ceil(short / _TILE_SIZE)
n_tiles = n_bins_long * n_bins_short
|
MODEL_ZOO = set(MODEL_CONFIG.keys())
def get_client(
api_key_env_name: str,
api_version: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
deployment_id_env_name: Optional[str] = None,
api_type: Optional[str] = "openai",
timeout: int = 60,
max_retries: int = 5,
):
if api_type == "azure":
assert (
api_version and endpoint_env_name and deployment_id_env_name
), "api_version, endpoint_env_name, and deployment_id_env_name must be specified when api_type is 'azure'."
return AzureOpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
api_version=api_version,
endpoint_env_name=endpoint_env_name,
deployment_id_env_name=deployment_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
elif api_type == "openai":
return OpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
else:
raise ValueError(f"api_type must be 'azure' or 'openai'. Got {api_type}.")
def get_async_client(
api_key_env_name: str,
api_version: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
deployment_id_env_name: Optional[str] = None,
api_type: Optional[str] = "openai",
timeout: int = 60,
max_retries: int = 5,
):
if api_type == "azure":
return AsyncAzureOpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
api_version=api_version,
endpoint_env_name=endpoint_env_name,
deployment_id_env_name=deployment_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
elif api_type == "openai":
return AsyncOpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
else:
raise ValueError(f"api_type must be 'azure' or 'openai'. Got {api_type}.")
def get_openai_client_settings(
api_key_env_name: str,
api_version: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
deployment_id_env_name: Optional[str] = None,
timeout: int = 60,
max_retries: int = 5,
) -> None:
outputs = {}
outputs["api_key"] = os.getenv(api_key_env_name)
if isinstance(api_version, str):
outputs["api_version"] = api_version
if isinstance(endpoint_env_name, str):
outputs["azure_endpoint"] = os.getenv(endpoint_env_name)
if isinstance(organization_id_env_name, str):
outputs["organization"] = os.getenv(organization_id_env_name)
if isinstance(deployment_id_env_name, str):
outputs["azure_deployment"] = os.getenv(deployment_id_env_name)
outputs["timeout"] = timeout
outputs["max_retries"] = max_retries
return outputs
def set_openai_envs(
api_key_env_name: str,
api_version: Optional[str] = None,
api_type: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
) -> None:
openai.api_key = os.getenv(api_key_env_name)
if isinstance(api_version, str):
openai.api_version = api_version
if isinstance(api_type, str):
openai.api_type = api_type
if isinstance(endpoint_env_name, str):
openai.api_base = os.getenv(endpoint_env_name)
if isinstance(organization_id_env_name, str):
openai.organization = os.getenv(organization_id_env_name)
def get_n_tokens(message: dict[str, str | list[dict[str, str|dict[str, str]]]] , model_name: str) -> int:
"""
Return the number of tokens used by a list of messages.
Forked and edited from : https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
"""
try:
encoding = tiktoken.encoding_for_model(model_name)
except KeyError:
# print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model_name in MODEL_ZOO:
if model_name == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
else:
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_n_tokens() is not implemented for model {model_name}. Please choose from following model : {', '.join(sorted(list(MODEL_ZOO)))}."
)
n_content_tokens = 0
n_other_tokens = tokens_per_message
# num_tokens += tokens_per_message
for key, value in message.items():
if key == "content":
if "vision" in model_name and isinstance(value, list):
for item in value: # value type is list[dict[str, str|dict[str, str]]
if item["type"] == "text":
n_content_tokens += len(encoding.encode(item["text"]))
elif item["type"] == "image_url":
n_content_tokens += 85 # Base tokens
if item["image_url"]["detail"] == "high":
if item["image_url"]["url"].startswith("data:image/jpeg;base64,"):
img_encoded = item["image_url"]["url"].replace(
"data:image/jpeg;base64,", ""
)
n_content_tokens += calculate_high_resolution_image_tokens(
decode_image(img_encoded).size
)
elif item["image_url"]["url"].startswith("https://"):
raise NotImplementedError(
"Image URL is not acceptable. Please use base64 encoded image."
)
else:
raise ValueError(f"Unknown type {item['type']} in message['content'].")
else:
n_content_tokens += len(encoding.encode(value))
elif key == "name":
n_other_tokens += tokens_per_name
else:
n_other_tokens += len(encoding.encode(value))
n_other_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
total_tokens = n_content_tokens + n_other_tokens
return {"total": total_tokens, "content": n_content_tokens, "other": n_other_tokens}
def get_token_limit(model_name: str):
if model_name in MODEL_ZOO:
return MODEL_CONFIG[model_name]["max_tokens"]
else:
raise NotImplementedError(
f"get_token_limit() is not implemented for model {model_name}. Please choose from following model : {', '.join(sorted(list(MODEL_ZOO)))}."
)
def make_batch(iterable, batch_size=1):
length = len(iterable)
for ndx in range(0, length, batch_size):
yield iterable[ndx : min(ndx + batch_size, length)]
def pil2bytes(image: Image.Image) -> bytes:
num_byteio = io.BytesIO()
image.save(num_byteio, format="jpeg")
image_bytes = num_byteio.getvalue()
return image_bytes
def encode_image(image):
if isinstance(image, Image.Image):
image_bytes = pil2bytes(image)
return base64.b64encode(image_bytes).decode("utf-8")
elif isinstance(image, np.ndarray):
image_pil = Image.fromarray(image)
image_bytes = pil2bytes(image_pil)
return base64.b64encode(image_bytes).decode("utf-8")
elif isinstance(image, bytes):
return base64.b64encode(image).decode("utf-8")
else:
raise ValueError(f"Type of {type(image)} is not supported for image.")
def decode_image(image_encoded):
image_encoded_utf = image_encoded.encode("utf-8")
image_bytes = base64.b64decode(image_encoded_utf)
byteio = io.BytesIO(image_bytes)
return Image.open(byteio)
def calculate_high_resolution_image_tokens(image_size: tuple[int, int] | list[int, int]):
h, w = image_size
short = min(h, w)
long = max(h, w)
if long > 2048:
short = int(short * 2048 / long)
long = 2048
if short > 768:
long = int(long * 768 / short)
short = 768
n_bins_long = math.ceil(long / _TILE_SIZE)
n_bins_short = math.ceil(short / _TILE_SIZE)
n_tiles = n_bins_long * n_bins_short | return _TOKENS_PER_TILE * n_tiles | 1 | 2023-12-10 09:42:35+00:00 | 4k |
Open-All-Scale-Causal-Engine/OpenASCE | openasce/attribution/attribution_model.py | [
{
"identifier": "Runtime",
"path": "openasce/core/runtime.py",
"snippet": "class Runtime:\n \"\"\"Runtime Class\n\n Provide the runtime layer to support different running environment, including the single machine or multiple machines.\n\n Attributes:\n\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n def launch(\n self, *, num: int = 1, param: Any = None, dataset: Iterable = None\n ) -> List:\n \"\"\"Start the job on current environment\n\n The function is called as the start point of one causal workload and setup the instances according to current environment. Iterable[Tuple[np.ndarray, np.ndarray]]\n\n Arguments:\n\n Returns:\n\n \"\"\"\n # TODO: In distributed environment, launch will setup the environment and submit the job. Then the object of the class needs to be created in workers, and then execute _instance_launch method.\n # self._instance_launch() # For now run in same process for single machine.\n ids = [i for i in range(num)]\n with concurrent.futures.ProcessPoolExecutor(\n max_workers=cpu_count()\n ) as executor:\n to_do = []\n for id in ids:\n futu = executor.submit(self._instance_launch, id, num, param, dataset)\n to_do.append(futu)\n finals = concurrent.futures.as_completed(to_do)\n fi = [f for f in finals]\n results = [f.result() for f in fi]\n return results\n\n def _instance_launch(\n self, idx: int, total_num: int, param: Any, dataset: Iterable\n ) -> Any:\n \"\"\"Running on the instance with multiple cores\n\n Arguments:\n\n Returns:\n\n \"\"\"\n # TODO: Prepare the worker running environment then call todo method, which should be overloaded by sub-class and implement the function.\n logger.info(f\"Begin to execute todo: {idx}/{total_num}\")\n result = self.todo(idx, total_num, param, dataset)\n logger.info(f\"Finish execute todo: {idx}/{total_num}\")\n return result\n\n def todo(self, id: int, total_num: int, param: Any, dataset: Iterable) -> Any:\n \"\"\"Contain the function from the sub-class, and run it in workers\n\n The sub-class should implement this routine and runtime invokes it.\n\n Arguments:\n\n Returns:\n\n \"\"\"\n raise NotImplementedError(f\"Not implement for abstract class\")"
},
{
"identifier": "InferenceModel",
"path": "openasce/inference/inference_model.py",
"snippet": "class InferenceModel(Runtime):\n \"\"\"Inference Class\n\n Base class of the causal inference\n\n Attributes:\n\n \"\"\"\n\n CONDITION_DICT_NAME = \"condition\"\n TREATMENT_VALUE = \"treatment_value\"\n LABEL_VALUE = \"label_value\"\n\n def __init__(self) -> None:\n super().__init__()\n\n @property\n def data(self):\n \"\"\"Return the sample data\"\"\"\n raise NotImplementedError(f\"Not implement for abstract class\")\n\n def fit(\n self,\n *,\n X: Iterable[np.ndarray],\n Y: Iterable[np.ndarray],\n T: Iterable[np.ndarray],\n **kwargs,\n ) -> None:\n \"\"\"Feed the sample data and train the model used to effect on the samples.\n\n Arguments:\n X: Features of the samples.\n Y: Outcomes of the samples.\n T: Treatments of the samples.\n\n Returns:\n None\n \"\"\"\n pass\n\n def estimate(\n self,\n *,\n X: Iterable[np.ndarray],\n T: Iterable[np.ndarray],\n **kwargs,\n ) -> None:\n \"\"\"Feed the sample data and estimate the effect on the samples\n\n Arguments:\n X: Features of the samples.\n T: Treatments of the samples.\n\n Returns:\n None\n \"\"\"\n pass\n\n def get_result(self) -> Any:\n \"\"\"Get the estimated result\n\n The sub-class should implement this routine and runtime invokes it.\n\n Returns:\n The estimation result.\n \"\"\"\n return self._estimate_result\n\n def output(self, output_path: str) -> None:\n \"\"\"Output the estimated result to files\n\n The sub-class should implement this routine and runtime invokes it.\n\n Arguments:\n output_path: The path of output file.\n\n Returns:\n None\n \"\"\"\n from numpy import savetxt\n\n savetxt(output_path, self.get_result())\n logger.info(f\"Write result to file: {output_path}\")\n\n def _wrap_fit(m):\n @wraps(m)\n def call(self, *, X, Y, T, **kwargs):\n self._prefit(Y, T, X=X, **kwargs)\n # call the wrapped fit method\n m(self, X=X, Y=Y, T=T, **kwargs)\n self._postfit(Y, T, X=X, **kwargs)\n return self\n\n return call"
},
{
"identifier": "logger",
"path": "openasce/utils/logger.py",
"snippet": "GLOBAL_LOGGER_NAME = \"openasce-log\"\nDEFAULT_FORMAT = (\n \"[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d:%(funcName)s] %(message)s\"\n)\nDEFAULT_FORMATTER = logging.Formatter(DEFAULT_FORMAT)\ndef init_custom_logger(name):\nclass openasceLogger(object):"
}
] | import copy
import random
import numpy as np
from typing import Iterable, List
from openasce.core.runtime import Runtime
from openasce.inference.inference_model import InferenceModel
from openasce.utils.logger import logger | 1,936 | # Copyright 2023 AntGroup CO., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
class Attribution(Runtime):
"""Attribution Class
Attributes:
"""
def __init__(
self, *, threshold: float, max_step: int = 2, top_num: int = None
) -> None:
"""Constructor
Argument:
threshold: the score threshold
max_step: the maximal step. For the attribution based on causal graph, that is the maximal node number.
top_num: the accepted number of best options in each step, which is used in greedy attribution.
"""
super().__init__()
self._inferencer = None
self._data = None
self._threshold = threshold
self._max_step = max_step
self._top_num = top_num
self._column_names = None
self._treatment_name = None
self._label_name = None
self._label_value = None
self._result = []
@property
def column_names(self):
"""All nodes' name.
Note: should include the treatment node and label node.
"""
assert self._column_names is not None, "column names should be set in advance"
return self._column_names
@column_names.setter
def column_names(self, value: List[str]):
assert self._column_names is None
self._column_names = value
@property
def treatment_name(self):
assert self._treatment_name is not None
return self._treatment_name
@treatment_name.setter
def treatment_name(self, value: str):
assert self._treatment_name is None
self._treatment_name = value
@property
def label_name(self):
assert self._label_name is not None
return self._label_name
@label_name.setter
def label_name(self, value: str):
assert self._label_name is None
self._label_name = value
@property
def label_value(self):
assert self._label_value is not None
return self._label_value
@label_value.setter
def label_value(self, value):
assert self._label_value is None
self._label_value = value
@property
| # Copyright 2023 AntGroup CO., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
class Attribution(Runtime):
"""Attribution Class
Attributes:
"""
def __init__(
self, *, threshold: float, max_step: int = 2, top_num: int = None
) -> None:
"""Constructor
Argument:
threshold: the score threshold
max_step: the maximal step. For the attribution based on causal graph, that is the maximal node number.
top_num: the accepted number of best options in each step, which is used in greedy attribution.
"""
super().__init__()
self._inferencer = None
self._data = None
self._threshold = threshold
self._max_step = max_step
self._top_num = top_num
self._column_names = None
self._treatment_name = None
self._label_name = None
self._label_value = None
self._result = []
@property
def column_names(self):
"""All nodes' name.
Note: should include the treatment node and label node.
"""
assert self._column_names is not None, "column names should be set in advance"
return self._column_names
@column_names.setter
def column_names(self, value: List[str]):
assert self._column_names is None
self._column_names = value
@property
def treatment_name(self):
assert self._treatment_name is not None
return self._treatment_name
@treatment_name.setter
def treatment_name(self, value: str):
assert self._treatment_name is None
self._treatment_name = value
@property
def label_name(self):
assert self._label_name is not None
return self._label_name
@label_name.setter
def label_name(self, value: str):
assert self._label_name is None
self._label_name = value
@property
def label_value(self):
assert self._label_value is not None
return self._label_value
@label_value.setter
def label_value(self, value):
assert self._label_value is None
self._label_value = value
@property | def inferencer(self) -> InferenceModel: | 1 | 2023-12-06 05:54:36+00:00 | 4k |
latorc/Wechat-AI-Assistant | chatbot.py | [
{
"identifier": "WcfWrapper",
"path": "wcf_wrapper.py",
"snippet": "class WcfWrapper:\r\n def __init__(self) -> None:\r\n def __del__(self):\r\n def msg_preview_str(self, msg:WxMsg) -> str:\r\n def wxid_to_nickname(self, wxid) -> str:\r\n def wxid_to_wxcode(self, wxid) -> str:\r\n def get_msg(self) -> WxMsg:\r\n def get_msg_text(self, msg:WxMsg) -> str:\r\n def get_content_type(self, msg:WxMsg) -> int:\r\n def get_refer_content(self, msg:WxMsg) -> ChatMsg:\r\n def get_msg_extra(self, msgid:str, sample_extra:str) -> str:\r\n def get_image(self, msgid:str, extra:str) -> str:\r\n def get_video(self, msgid:str, extra:str) -> str:\r\n def send_message(self, chat_msg:ChatMsg, receiver:str, at_list:str=\"\") -> int:\r\n def send_text(self, msg: str, receiver: str, at_list: str = \"\") -> int:\r\n def send_image(self, file:str, receiver:str) -> int:\r\n def send_file(self, file:str, receiver:str) -> int:\r\n def search_msg(self):\r"
},
{
"identifier": "AdminCmd",
"path": "config.py",
"snippet": "class AdminCmd(Enum):\r\n \"\"\" 微信机器人管理员命令, 与配置项目名称对应 \"\"\"\r\n help = auto()\r\n reload_config = auto()\r\n clear_chat = auto()\r\n load_preset = auto()\r\n reset_preset = auto()\r\n list_preset = auto()\r\n chat_id = auto()\r\n \r\n @property\r\n def description(self):\r\n \"\"\" 返回命令的描述说明 \"\"\"\r\n texts = {\r\n AdminCmd.help: \"显示帮助信息\",\r\n AdminCmd.reload_config: \"重新载入配置文件\",\r\n AdminCmd.clear_chat: \"清除当前对话记忆\",\r\n AdminCmd.load_preset: \"预设名 为当前对话载入预设\",\r\n AdminCmd.reset_preset: \"为当前对话清除预设\",\r\n AdminCmd.list_preset: \"列出当前可用预设\",\r\n AdminCmd.chat_id: \"显示当前对话(群聊或单聊)的id\" \r\n }\r\n return texts.get(self, \"\")\r"
},
{
"identifier": "ContentType",
"path": "common.py",
"snippet": "class ContentType(Enum):\r\n \"\"\" 表示用微信发送的消息的类型\"\"\"\r\n text = 1 # 文字\r\n image = 3 # 图片\r\n link = 4 # 链接\r\n file = 6 # 文件\r\n voice = 34 # 语音\r\n video = 43 # 视频\r\n ERROR = 9000 # 错误\r\n UNSUPPORTED = 9001 # 不支持类型\r"
},
{
"identifier": "ChatMsg",
"path": "common.py",
"snippet": "class ChatMsg:\r\n \"\"\" 代表某种类型的消息, 用于内部数据传递 \"\"\"\r\n def __init__(self, type:ContentType, content:str) -> None:\r\n \"\"\" 初始化\r\n Args:\r\n type (ContentType): 附件类型\r\n content (str): 附件内容\r\n \"\"\"\r\n self.type = type\r\n self.content = content\r"
}
] | import queue
import re
import config
import common
import openai_wrapper
import preset
from typing import Tuple
from wcf_wrapper import WcfWrapper, ContentType
from wcferry import WxMsg
from config import AdminCmd
from common import ContentType, ChatMsg
| 2,833 | return self.wcfw.send_message(msg, receiver, at_list)
try:
# 根据预设加上格式
preset = self.chat_presets.get(receiver, self.config.default_preset)
text = preset.construct_msg(content, self.wcfw.wxid_to_wxcode(msg.sender), self.wcfw.wxid_to_nickname(msg.sender))
# 获取引用消息及附件
refer_msg = self.wcfw.get_refer_content(msg)
files = []
if refer_msg is None: # 无引用内容
pass
elif refer_msg.type == ContentType.text: # 引用文本
text = text + f"\n(引用文本:\n{refer_msg.content})"
elif refer_msg.type == ContentType.link: # 引用链接
text = text + f"\n(引用链接:\n{refer_msg.content})"
elif refer_msg.type in (ContentType.image, ContentType.file): # 图片, 文件
files.append(refer_msg.content)
elif refer_msg.type == ContentType.voice: # 语音
text += f"\n(语音文件: {refer_msg.content})"
# self.openai_wrapper.run_audio_msg(receiver, text, refer_msg.content, callback_msg)
elif refer_msg.type == ContentType.video: # 视频
text += f"\n(视频文件: {refer_msg.content})"
# self.openai_wrapper.run_video_msg(receiver, text, refer_msg.content, callback_msg)
elif refer_msg.type == ContentType.ERROR: # 处理错误
self.wcfw.send_text("获取引用内容发生错误", receiver, at_list)
return
else: # 其他
# tp == WxMsgType.UNSUPPORTED
self.wcfw.send_text("抱歉, 不支持引用这类消息", receiver, at_list)
return
# 调用 OpenAI 运行消息 (阻塞直到全部消息处理结束)
self.openai_wrapper.run_msg(receiver, text, files, callback_msg)
except Exception as e:
common.logger().error("响应消息发生错误: %s", common.error_trace(e))
self.wcfw.send_text(f"对不起, 响应该消息时发生错误: {common.error_info(e)}", receiver, at_list)
def _filter_wxmsg(self, msg:WxMsg) -> str:
""" 判断是否响应这条消息
如果响应, 返回消息原文(去掉前缀)
如果忽略, 返回None
"""
# 过滤消息类型
if msg.type == 1: # 文本
pass
elif msg.type == 34: # 语音
pass
elif msg.type == 49: # 引用/文件/链接? 进一步看content type
ct = self.wcfw.get_content_type(msg)
if ct == 57: # 引用
pass
else:
return None
else:
return None
# 过滤消息内容
content = self.wcfw.get_msg_text(msg).strip()
if msg.from_group(): #群聊消息
# 白名单过滤
if "$all" in self.config.group_whitelist:
pass
else:
if msg.roomid not in self.config.group_whitelist:
return None
if msg.from_self() : #来自自己的消息, 如果有prefix开头, 去掉prefix; 否则忽略
for p in self.config.self_prefix:
if content.startswith(p):
content = content.removeprefix(p).strip()
return content
return None
if msg.is_at(self.wcfw.wxid): # @我的消息, 处理
#去掉@前缀, 获得消息正文
# 正则匹配: @开头 + 任意字符 + \u2005(1/4空格)或任意空白或结尾
content = re.sub(r"@.*?([\u2005\s]|$)", "", content).strip()
return content
else: # 其他情况, 忽略
return None
else: #单聊消息
# 微信号白名单
wxcode = self.wcfw.wxid_to_wxcode(msg.sender)
if "$all" in self.config.single_chat_whitelist:
pass
else:
if wxcode in self.config.single_chat_whitelist:
pass
else:
return None
if msg.from_self() : #来自自己的消息, 如果有prefix开头, 去掉prefix; 否则忽略
for p in self.config.self_prefix:
if content.startswith(p):
content = content.removeprefix(p).strip()
return content
return None
# 来自对方消息:
if not self.config.single_chat_prefix: # 未定义前缀: 响应所有
if msg.type == 34: # 语音
# return None
common.logger().info("转录语音")
audiofile = self.wcfw.wcf.get_audio_msg(msg.id, common.temp_dir())
text = self.openai_wrapper.audio_trans(audiofile)
return text
else:
return content
else:
for p in self.config.single_chat_prefix: # 已定义前缀: 只响应前缀开头的消息
if content.startswith(p):
return content.removeprefix(p).strip()
return None
return None
|
class Chatbot():
""" 管理微信机器人逻辑. 管理与微信客户端 (如Wechat Ferry) 和 AI 客户端 (如 OpenAI )的交互逻辑 """
def __init__(self, config: config.Config, wcfw: WcfWrapper, oaiw: openai_wrapper.OpenAIWrapper) -> None:
""" 初始化
args:
config (Config): Config对象
wcfw (WcfWrapper): Wechat Ferry Wrapper对象
oaiw (OpenAIWrapper): AI Wrapper对象
"""
self.config = config
self.wcfw = wcfw
self.openai_wrapper = oaiw
self.chat_presets:dict[str, preset.Preset] = {} # 每个对话的预设 {roomid或wxid: 预设}
def start_main_loop(self) -> None:
"""
主循环, 接收并处理微信消息.
该函数阻塞进程.
"""
while self.wcfw.wcf.is_receiving_msg():
try:
msg:WxMsg = self.wcfw.get_msg()
note = f"收到消息 {self.wcfw.msg_preview_str(msg)}"
common.logger().info(note)
except queue.Empty:
continue # 无消息,继续
except Exception as e:
common.logger().error("接收微信消息错误: %s", common.error_trace(e))
try:
self.run_wxmsg(msg)
except Exception as e:
common.logger().error("处理消息错误:%s", common.error_trace(e))
def run_wxmsg(self, msg:WxMsg):
""" 读取并处理一条消息
args:
msg (WxMsg): 消息对象. 群号: msg.roomid, 发送者微信ID: msg.sender, 消息内容: msg.content
"""
content = self._filter_wxmsg(msg)
if content is None:
return
# 确定回复对象
if msg.from_group():
receiver = msg.roomid
if msg.from_self():
at_list = ""
else:
at_list = msg.sender
else: #单聊
receiver = msg.sender
at_list = ""
# 发送者是管理员, 并且是命令时, 处理命令并直接返回
if self.wcfw.wxid_to_wxcode(msg.sender) in self.config.admins:
cmd = self._match_admin_cmd(content)
if cmd:
try:
self.process_admin_cmd(content, receiver, at_list)
except Exception as e:
common.logger().error("执行管理员命令错误: %s",common.error_trace(e))
self.wcfw.send_text(f"执行管理员命令'{content}'发生错误", receiver, at_list)
return
### 调用 AI 处理消息
# 回调函数, 处理 AI 返回消息
def callback_msg(msg:ChatMsg) -> int:
return self.wcfw.send_message(msg, receiver, at_list)
try:
# 根据预设加上格式
preset = self.chat_presets.get(receiver, self.config.default_preset)
text = preset.construct_msg(content, self.wcfw.wxid_to_wxcode(msg.sender), self.wcfw.wxid_to_nickname(msg.sender))
# 获取引用消息及附件
refer_msg = self.wcfw.get_refer_content(msg)
files = []
if refer_msg is None: # 无引用内容
pass
elif refer_msg.type == ContentType.text: # 引用文本
text = text + f"\n(引用文本:\n{refer_msg.content})"
elif refer_msg.type == ContentType.link: # 引用链接
text = text + f"\n(引用链接:\n{refer_msg.content})"
elif refer_msg.type in (ContentType.image, ContentType.file): # 图片, 文件
files.append(refer_msg.content)
elif refer_msg.type == ContentType.voice: # 语音
text += f"\n(语音文件: {refer_msg.content})"
# self.openai_wrapper.run_audio_msg(receiver, text, refer_msg.content, callback_msg)
elif refer_msg.type == ContentType.video: # 视频
text += f"\n(视频文件: {refer_msg.content})"
# self.openai_wrapper.run_video_msg(receiver, text, refer_msg.content, callback_msg)
elif refer_msg.type == ContentType.ERROR: # 处理错误
self.wcfw.send_text("获取引用内容发生错误", receiver, at_list)
return
else: # 其他
# tp == WxMsgType.UNSUPPORTED
self.wcfw.send_text("抱歉, 不支持引用这类消息", receiver, at_list)
return
# 调用 OpenAI 运行消息 (阻塞直到全部消息处理结束)
self.openai_wrapper.run_msg(receiver, text, files, callback_msg)
except Exception as e:
common.logger().error("响应消息发生错误: %s", common.error_trace(e))
self.wcfw.send_text(f"对不起, 响应该消息时发生错误: {common.error_info(e)}", receiver, at_list)
def _filter_wxmsg(self, msg:WxMsg) -> str:
""" 判断是否响应这条消息
如果响应, 返回消息原文(去掉前缀)
如果忽略, 返回None
"""
# 过滤消息类型
if msg.type == 1: # 文本
pass
elif msg.type == 34: # 语音
pass
elif msg.type == 49: # 引用/文件/链接? 进一步看content type
ct = self.wcfw.get_content_type(msg)
if ct == 57: # 引用
pass
else:
return None
else:
return None
# 过滤消息内容
content = self.wcfw.get_msg_text(msg).strip()
if msg.from_group(): #群聊消息
# 白名单过滤
if "$all" in self.config.group_whitelist:
pass
else:
if msg.roomid not in self.config.group_whitelist:
return None
if msg.from_self() : #来自自己的消息, 如果有prefix开头, 去掉prefix; 否则忽略
for p in self.config.self_prefix:
if content.startswith(p):
content = content.removeprefix(p).strip()
return content
return None
if msg.is_at(self.wcfw.wxid): # @我的消息, 处理
#去掉@前缀, 获得消息正文
# 正则匹配: @开头 + 任意字符 + \u2005(1/4空格)或任意空白或结尾
content = re.sub(r"@.*?([\u2005\s]|$)", "", content).strip()
return content
else: # 其他情况, 忽略
return None
else: #单聊消息
# 微信号白名单
wxcode = self.wcfw.wxid_to_wxcode(msg.sender)
if "$all" in self.config.single_chat_whitelist:
pass
else:
if wxcode in self.config.single_chat_whitelist:
pass
else:
return None
if msg.from_self() : #来自自己的消息, 如果有prefix开头, 去掉prefix; 否则忽略
for p in self.config.self_prefix:
if content.startswith(p):
content = content.removeprefix(p).strip()
return content
return None
# 来自对方消息:
if not self.config.single_chat_prefix: # 未定义前缀: 响应所有
if msg.type == 34: # 语音
# return None
common.logger().info("转录语音")
audiofile = self.wcfw.wcf.get_audio_msg(msg.id, common.temp_dir())
text = self.openai_wrapper.audio_trans(audiofile)
return text
else:
return content
else:
for p in self.config.single_chat_prefix: # 已定义前缀: 只响应前缀开头的消息
if content.startswith(p):
return content.removeprefix(p).strip()
return None
return None
| def _match_admin_cmd(self, content:str) -> Tuple[str, config.AdminCmd]:
| 1 | 2023-12-07 12:17:15+00:00 | 4k |
tensorsense/faceflow | params/model.py | [
{
"identifier": "core",
"path": "lib/core.py",
"snippet": "class AUHead(torch.nn.Module):\nclass AUModel(pl.LightningModule):\n def __init__(\n self,\n task: str,\n in_channels: int,\n num_classes: int = 1,\n logits_per_class: int = 1,\n act: torch.nn.Module = torch.nn.Identity(),\n loss: torch.nn.Module = None,\n weight: float = 1.0,\n ):\n def forward(self, x):\n def predict(self, x):\n def __init__(\n self,\n backbone: torch.nn.Module,\n heads_partials: List[partial[AUHead]],\n optimizer_partial: partial = None,\n scheduler_partial: partial = None,\n test_metrics: Dict[str, partial] = None,\n ):\n def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:\n def predict(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:\n def training_step(self, batch, batch_idx) -> STEP_OUTPUT:\n def validation_step(self, batch, batch_idx, dataloader_idx=0) -> STEP_OUTPUT:\n def configure_optimizers(self):\n def lr_scheduler_step(self, scheduler: Scheduler, metric):"
},
{
"identifier": "AUModel",
"path": "lib/core.py",
"snippet": "class AUModel(pl.LightningModule):\n def __init__(\n self,\n backbone: torch.nn.Module,\n heads_partials: List[partial[AUHead]],\n optimizer_partial: partial = None,\n scheduler_partial: partial = None,\n test_metrics: Dict[str, partial] = None,\n ):\n \"\"\"\n Lightning wrapper that encapsulates training and validation workflows.\n :param backbone: timm-compatible feature extractor\n :param heads_partials: partials of AUHead that will be completed with backbone out channels\n :param optimizer_partial: partial of an optimizer that will get model parameters passed into it\n :param scheduler_partial: partial of a scheduled that will get an optimizer passed into it\n :param test_metrics: dict of functional metrics to be used during test phase\n \"\"\"\n super().__init__()\n self.backbone = backbone\n self.head_partials = heads_partials\n self.optimizer_partial = optimizer_partial\n self.scheduler_partial = scheduler_partial\n self.test_metrics = test_metrics\n\n _heads = [\n h(in_channels=self.backbone.feature_info.channels()[-1])\n for h in self.head_partials\n ]\n self.heads: torch.nn.ModuleDict[str, AUHead] = torch.nn.ModuleDict(\n {h.task: h for h in _heads}\n )\n\n print(f\"Backbone reduction: {self.backbone.feature_info.reduction()}\")\n print(f\"Backbone channels: {self.backbone.feature_info.channels()}\")\n\n def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:\n x = self.backbone(x)[-1]\n outs = {task: head(x).squeeze() for task, head in self.heads.items()}\n return outs\n\n def predict(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:\n x = self.backbone(x)[-1]\n outs = {task: head.predict(x).squeeze() for task, head in self.heads.items()}\n return outs\n\n def training_step(self, batch, batch_idx) -> STEP_OUTPUT:\n image = batch[\"img\"]\n x = self(image)\n\n assert set(x.keys()).issubset(\n batch.keys()\n ), f\"Missing gt for pred keys: gt {batch.keys()}, pred {x.keys()}\"\n\n loss = torch.zeros(1, device=self.device)\n for task, head in self.heads.items():\n loss += head.loss(x[task], batch[task]) * head.weight\n\n self.log(\"train_loss\", loss)\n return {\"loss\": loss, \"logits\": x}\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0) -> STEP_OUTPUT:\n image = batch[\"img\"]\n x = self(image)\n\n assert set(x.keys()).issubset(\n batch.keys()\n ), f\"Missing gt for pred keys: gt {batch.keys()}, pred {x.keys()}\"\n\n loss = torch.zeros(1, device=self.device)\n for task, head in self.heads.items():\n pred = x[task]\n if batch[task].shape[1] == pred.shape[1] // 2:\n # Assuming joined sides, merging predictions...\"\n pred = pred.view(pred.shape[0], -1, 2, pred.shape[2])\n pred = pred.max(dim=2).values\n\n loss += head.loss(pred, batch[task]) * head.weight\n\n self.log(\"val_loss\", loss)\n return {\"loss\": loss, \"logits\": x}\n\n def configure_optimizers(self):\n assert (\n self.optimizer_partial is not None\n ), \"Optimizer was not provided on initialization\"\n assert (\n self.scheduler_partial is not None\n ), \"Scheduler was not provided on initialization\"\n\n optimizer = self.optimizer_partial(self.parameters())\n scheduler = self.scheduler_partial(optimizer)\n return [optimizer], [{\"scheduler\": scheduler, \"interval\": \"epoch\"}]\n\n def lr_scheduler_step(self, scheduler: Scheduler, metric):\n scheduler.step(\n epoch=self.current_epoch\n ) # timm's scheduler needs the epoch value"
},
{
"identifier": "DistributionBalancedLoss",
"path": "lib/losses/db.py",
"snippet": "class DistributionBalancedLoss(nn.Module):\n def __init__(\n self,\n reduction=\"mean\",\n pos_counts=None,\n neg_counts=None,\n ):\n super().__init__()\n\n self.reduction = reduction\n self.cls_criterion = binary_cross_entropy_with_logits\n\n # focal loss params\n self.gamma = 2.0\n self.balance_param = 2.0\n\n # mapping function params\n self.map_alpha = 0.1\n self.map_beta = 10.0\n self.map_gamma = 0.2\n\n self.pos_count = torch.from_numpy(pos_counts).float()\n self.neg_count = torch.from_numpy(neg_counts).float()\n self.num_classes = self.pos_count.shape[0]\n self.train_num = self.pos_count[0] + self.neg_count[0]\n\n # regularization params\n self.neg_scale = 2.0\n init_bias = 0.05\n\n self.init_bias = (\n -torch.log(self.train_num / self.pos_count - 1) * init_bias / self.neg_scale\n )\n\n self.freq_inv = torch.ones(self.pos_count.shape) / self.pos_count\n\n def forward(self, cls_score, label):\n cls_score = cls_score.clone()\n weight = self.rebalance_weight(label.float())\n cls_score, weight = self.logit_reg_functions(label.float(), cls_score, weight)\n\n # focal\n logpt = -self.cls_criterion(\n cls_score.clone(), label, weight=None, reduction=\"none\"\n )\n # pt is sigmoid(logit) for pos or sigmoid(-logit) for neg\n pt = torch.exp(logpt)\n loss = self.cls_criterion(\n cls_score, label.float(), weight=weight, reduction=\"none\"\n )\n loss = ((1 - pt) ** self.gamma) * loss\n loss = self.balance_param * loss\n\n # Check reduction option and return loss accordingly\n if self.reduction == \"none\":\n pass\n elif self.reduction == \"mean\":\n loss = loss.mean()\n elif self.reduction == \"sum\":\n self.reduction = loss.sum()\n\n return loss\n\n def logit_reg_functions(self, labels, logits, weight=None):\n self.init_bias = self.init_bias.to(logits)\n logits += self.init_bias\n logits = logits * (1 - labels) * self.neg_scale + logits * labels\n weight = weight / self.neg_scale * (1 - labels) + weight * labels\n return logits, weight\n\n def rebalance_weight(self, gt_labels):\n self.freq_inv = self.freq_inv.to(gt_labels)\n repeat_rate = torch.sum(gt_labels.float() * self.freq_inv, dim=1, keepdim=True)\n pos_weight = self.freq_inv.clone().detach().unsqueeze(0) / repeat_rate\n # pos and neg are equally treated\n weight = (\n torch.sigmoid(self.map_beta * (pos_weight - self.map_gamma))\n + self.map_alpha\n )\n return weight"
},
{
"identifier": "datamodule",
"path": "params/datamodule.py",
"snippet": "TRAIN_LABELED = [\n LocalNaturalDatasetCfg(\n name=\"disfa\",\n root=\"/data\",\n aus=aus,\n crops_dir=\"/data/cropped_images\",\n labels_filename=\"df_proc_tmp_train.csv\",\n )\n]\nTRAIN_UNLABELED = []\nVAL_DATASETS = [\n LocalNaturalDatasetCfg(\n name=\"disfa\",\n root=\"/data\",\n aus=aus,\n crops_dir=\"/data/cropped_images\",\n labels_filename=\"df_proc_tmp_test.csv\",\n )\n]"
}
] | from functools import partial
from timm.scheduler import CosineLRScheduler
from torch.nn import BCEWithLogitsLoss
from torchvision.ops.focal_loss import sigmoid_focal_loss
from lib import core
from lib.core import AUModel
from lib.losses.db import DistributionBalancedLoss
from params.datamodule import datamodule, logits_per_class, num_aus
import timm
import torch | 2,481 |
backbone = timm.create_model(
"convnextv2_nano.fcmae_ft_in22k_in1k", pretrained=True, features_only=True
)
# loss = BCEWithLogitsLoss()
loss = partial(sigmoid_focal_loss, reduction="mean")
# datamodule.setup("fit")
# train_datasets = datamodule.train_dataset.datasets
# pos_counts = sum([ds.pos_counts for ds in train_datasets])
# neg_counts = sum([ds.neg_counts for ds in train_datasets])
# loss = DistributionBalancedLoss(pos_counts=pos_counts, neg_counts=neg_counts)
# def binary_dice_loss(inputs, targets, smooth=1e-4):
# inputs = torch.nn.functional.sigmoid(inputs.squeeze())
# inputs = inputs.view(-1)
# targets = targets.view(-1)
#
# intersection = (inputs * targets).sum()
# dice = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth)
# return 1 - dice
#
#
# loss = binary_dice_loss
heads_partials = [
partial(
core.AUHead,
task="multilabel",
num_classes=num_aus,
|
backbone = timm.create_model(
"convnextv2_nano.fcmae_ft_in22k_in1k", pretrained=True, features_only=True
)
# loss = BCEWithLogitsLoss()
loss = partial(sigmoid_focal_loss, reduction="mean")
# datamodule.setup("fit")
# train_datasets = datamodule.train_dataset.datasets
# pos_counts = sum([ds.pos_counts for ds in train_datasets])
# neg_counts = sum([ds.neg_counts for ds in train_datasets])
# loss = DistributionBalancedLoss(pos_counts=pos_counts, neg_counts=neg_counts)
# def binary_dice_loss(inputs, targets, smooth=1e-4):
# inputs = torch.nn.functional.sigmoid(inputs.squeeze())
# inputs = inputs.view(-1)
# targets = targets.view(-1)
#
# intersection = (inputs * targets).sum()
# dice = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth)
# return 1 - dice
#
#
# loss = binary_dice_loss
heads_partials = [
partial(
core.AUHead,
task="multilabel",
num_classes=num_aus, | logits_per_class=logits_per_class, | 1 | 2023-12-05 13:15:58+00:00 | 4k |
8none1/idealLED | custom_components/ideal_led/light.py | [
{
"identifier": "IDEALLEDInstance",
"path": "custom_components/ideal_led/idealled.py",
"snippet": "class IDEALLEDInstance:\n def __init__(self, address, reset: bool, delay: int, hass) -> None:\n self.loop = asyncio.get_running_loop()\n self._mac = address\n self._reset = reset\n self._delay = delay\n self._hass = hass\n self._device: BLEDevice | None = None\n self._device = bluetooth.async_ble_device_from_address(self._hass, address)\n if not self._device:\n raise ConfigEntryNotReady(\n f\"You need to add bluetooth integration (https://www.home-assistant.io/integrations/bluetooth) or couldn't find a nearby device with address: {address}\"\n )\n self._connect_lock: asyncio.Lock = asyncio.Lock()\n self._client: BleakClientWithServiceCache | None = None\n self._disconnect_timer: asyncio.TimerHandle | None = None\n self._cached_services: BleakGATTServiceCollection | None = None\n self._expected_disconnect = False\n self._is_on = None\n self._rgb_color = None\n self._brightness = 255\n self._effect = None\n self._effect_speed = 0x64\n self._color_mode = ColorMode.RGB\n self._write_uuid = None\n self._write_colour_uuid = None\n self._read_uuid = None\n self._turn_on_cmd = None\n self._turn_off_cmd = None\n self._model = self._detect_model()\n self._on_update_callbacks = []\n \n LOGGER.debug(\n \"Model information for device %s : ModelNo %s. MAC: %s\",\n self._device.name,\n self._model,\n self._mac,\n )\n\n def _detect_model(self):\n x = 0\n for name in NAME_ARRAY:\n if self._device.name.lower().startswith(name.lower()): # TODO: match on BLE provided model instead of name\n return x\n x = x + 1\n\n async def _write(self, data: bytearray):\n \"\"\"Send command to device and read response.\"\"\"\n await self._ensure_connected()\n cipher = AES.new(SECRET_ENCRYPTION_KEY, AES.MODE_ECB)\n ciphered_data = cipher.encrypt(data)\n await self._write_while_connected(ciphered_data)\n\n async def _write_colour_data(self, data: bytearray):\n \"\"\"Send command to device and read response.\"\"\"\n await self._ensure_connected()\n await self._write_colour_while_connected(data)\n\n async def _write_while_connected(self, data: bytearray):\n LOGGER.debug(f\"Writing data to {self.name}: {data}\")\n await self._client.write_gatt_char(self._write_uuid, data, False)\n \n async def _write_colour_while_connected(self, data: bytearray):\n LOGGER.debug(f\"Writing colour data to {self.name}: {data}\")\n await self._client.write_gatt_char(self._write_colour_uuid, data, False)\n \n def _notification_handler(self, _sender: BleakGATTCharacteristic, data: bytearray) -> None:\n # This doesn't work. I can't get the controller to send notifications.\n \"\"\"Handle BLE notifications from the device. Update internal state to reflect the device state.\"\"\"\n LOGGER.debug(\"N: %s: Notification received\", self.name)\n #self.local_callback()\n\n\n @property\n def mac(self):\n return self._device.address\n\n @property\n def reset(self):\n return self._reset\n\n @property\n def name(self):\n return self._device.name\n\n @property\n def rssi(self):\n return self._device.rssi\n\n @property\n def is_on(self):\n return self._is_on\n\n @property\n def brightness(self):\n return self._brightness \n\n @property\n def rgb_color(self):\n return self._rgb_color\n\n @property\n def effect_list(self) -> list[str]:\n return EFFECT_LIST\n\n @property\n def effect(self):\n return self._effect\n \n @property\n def color_mode(self):\n return self._color_mode\n\n @retry_bluetooth_connection_error\n async def set_rgb_color(self, rgb: Tuple[int, int, int], brightness: int | None = None):\n # TODO: Add support for brightness\n self._rgb_color = rgb\n if brightness is None:\n if self._brightness is None:\n self._brightness = 255\n else:\n brightness = self._brightness\n brightness_percent = int(brightness * 100 / 255)\n # Now adjust the RBG values to match the brightness\n red = int(rgb[0] * brightness_percent / 100)\n green = int(rgb[1] * brightness_percent / 100)\n blue = int(rgb[2] * brightness_percent / 100)\n # RGB packet\n rgb_packet = bytearray.fromhex(\"0F 53 47 4C 53 00 00 64 50 1F 00 00 1F 00 00 32\")\n red = int(red >> 3) # You CAN send 8 bit colours to this thing, but you probably shouldn't for power reasons. Thanks to the good folks at Hacker News for that insight.\n green = int(green >> 3)\n blue = int(blue >> 3)\n rgb_packet[9] = red\n rgb_packet[12] = red\n rgb_packet[10] = green\n rgb_packet[13] = green\n rgb_packet[11] = blue\n rgb_packet[14] = blue\n await self._write(rgb_packet) \n\n\n @retry_bluetooth_connection_error\n # effect, reverse=0, speed=50, saturation=50, colour_data=COLOUR_DATA\n async def set_effect(self, effect: str, brightness: int | None = NotImplemented):\n if effect not in EFFECT_LIST:\n LOGGER.error(\"Effect %s not supported\", effect)\n return\n self._effect = effect\n effect_id = EFFECT_MAP.get(effect)\n if effect_id > 11: effect = 11\n packet = bytearray.fromhex(\"0A 4D 55 4C 54 08 00 64 50 07 32 00 00 00 00 00\")\n packet[5] = effect_id\n packet[6] = 0 # reverse\n packet[8] = 50 # speed\n packet[10] = 50 # saturation (brightness?)\n await self._write(packet)\n # Now we send the colour data\n await self.write_colour_data()\n \n @retry_bluetooth_connection_error\n async def write_colour_data(self):\n # This is sent after switching to an effect to tell the device what sort of pattern to show.\n # In the app you can edit this yourself, but HA doesn't have the UI for such a thing\n # so for now I'm just going to hardcode it to a rainbow pattern. You could change this to\n # whatever you want, but for an effect the maximum length is 7 colours.\n colour_list = []\n colour_divisions = int(360 / 7)\n for i in range(7):\n h = i * colour_divisions\n r, g, b = colorsys.hsv_to_rgb(h / 360, 1, 1)\n r = int(r * 255)\n g = int(g * 255)\n b = int(b * 255)\n colour_list.append((r, g, b))\n #print(f\"Colour list: {colour_list}\")\n length = len(colour_list)\n colour_data = []\n colour_data.append(length*3) # 3 bytes per colour\n colour_data.append(0) # Don't know what this is, perhaps just a separator\n for colour in colour_list:\n colour_data.append(colour[0])\n colour_data.append(colour[1])\n colour_data.append(colour[2])\n await self._write_colour_data(colour_data)\n\n\n @retry_bluetooth_connection_error\n async def turn_on(self):\n packet = bytearray.fromhex(\"05 54 55 52 4E 01 00 00 00 00 00 00 00 00 00 00\")\n packet[5] = 1\n await self._write(packet)\n self._is_on = True\n\n @retry_bluetooth_connection_error\n async def turn_off(self):\n packet = bytearray.fromhex(\"05 54 55 52 4E 01 00 00 00 00 00 00 00 00 00 00\")\n packet[5] = 0\n await self._write(packet)\n self._is_on = False\n\n @retry_bluetooth_connection_error\n async def update(self):\n LOGGER.debug(\"%s: Update in lwdnetwf called\", self.name)\n try:\n await self._ensure_connected()\n self._is_on = False\n except Exception as error:\n self._is_on = None # failed to connect, this should mark it as unavailable\n LOGGER.error(\"Error getting status: %s\", error)\n track = traceback.format_exc()\n LOGGER.debug(track)\n\n async def _ensure_connected(self) -> None:\n \"\"\"Ensure connection to device is established.\"\"\"\n if self._connect_lock.locked():\n LOGGER.debug(\n \"%s: Connection already in progress, waiting for it to complete\",\n self.name,\n )\n if self._client and self._client.is_connected:\n self._reset_disconnect_timer()\n return\n async with self._connect_lock:\n # Check again while holding the lock\n if self._client and self._client.is_connected:\n self._reset_disconnect_timer()\n return\n LOGGER.debug(\"%s: Connecting\", self.name)\n client = await establish_connection(\n BleakClientWithServiceCache,\n self._device,\n self.name,\n self._disconnected,\n cached_services=self._cached_services,\n ble_device_callback=lambda: self._device,\n )\n LOGGER.debug(\"%s: Connected\", self.name)\n resolved = self._resolve_characteristics(client.services)\n if not resolved:\n # Try to handle services failing to load\n resolved = self._resolve_characteristics(await client.get_services())\n self._cached_services = client.services if resolved else None\n\n self._client = client\n self._reset_disconnect_timer()\n\n # Subscribe to notification is needed for LEDnetWF devices to accept commands\n self._notification_callback = self._notification_handler\n await client.start_notify(self._read_uuid, self._notification_callback)\n LOGGER.debug(\"%s: Subscribed to notifications\", self.name)\n\n\n def _resolve_characteristics(self, services: BleakGATTServiceCollection) -> bool:\n \"\"\"Resolve characteristics.\"\"\"\n for characteristic in NOTIFY_CHARACTERISTIC_UUIDS:\n if char := services.get_characteristic(characteristic):\n self._read_uuid = char\n LOGGER.debug(\"%s: Read UUID: %s\", self.name, self._read_uuid)\n break\n for characteristic in WRITE_CMD_CHARACTERISTIC_UUIDS:\n if char := services.get_characteristic(characteristic):\n self._write_uuid = char\n LOGGER.debug(\"%s: Write UUID: %s\", self.name, self._write_uuid)\n break\n for characteristic in WRITE_COL_CHARACTERISTIC_UUIDS:\n if char := services.get_characteristic(characteristic):\n self._write_colour_uuid = char\n LOGGER.debug(\"%s: Write colour UUID: %s\", self.name, self._write_colour_uuid)\n break\n return bool(self._read_uuid and self._write_uuid and self._write_colour_uuid)\n\n def _reset_disconnect_timer(self) -> None:\n \"\"\"Reset disconnect timer.\"\"\"\n if self._disconnect_timer:\n self._disconnect_timer.cancel()\n self._expected_disconnect = False\n if self._delay is not None and self._delay != 0:\n LOGGER.debug(\n \"%s: Configured disconnect from device in %s seconds\",\n self.name,\n self._delay\n )\n self._disconnect_timer = self.loop.call_later(self._delay, self._disconnect)\n\n def _disconnected(self, client: BleakClientWithServiceCache) -> None:\n \"\"\"Disconnected callback.\"\"\"\n if self._expected_disconnect:\n LOGGER.debug(\"%s: Disconnected from device\", self.name)\n return\n LOGGER.warning(\"%s: Device unexpectedly disconnected\", self.name)\n\n def _disconnect(self) -> None:\n \"\"\"Disconnect from device.\"\"\"\n self._disconnect_timer = None\n asyncio.create_task(self._execute_timed_disconnect())\n\n async def stop(self) -> None:\n \"\"\"Stop the LEDBLE.\"\"\"\n LOGGER.debug(\"%s: Stop\", self.name)\n await self._execute_disconnect()\n\n async def _execute_timed_disconnect(self) -> None:\n \"\"\"Execute timed disconnection.\"\"\"\n LOGGER.debug(\n \"%s: Disconnecting after timeout of %s\",\n self.name,\n self._delay\n )\n await self._execute_disconnect()\n\n async def _execute_disconnect(self) -> None:\n \"\"\"Execute disconnection.\"\"\"\n async with self._connect_lock:\n read_char = self._read_uuid\n client = self._client\n self._expected_disconnect = True\n self._client = None\n self._write_uuid = None\n self._read_uuid = None\n if client and client.is_connected:\n await client.stop_notify(read_char) # TODO: I don't think this is needed. Bleak docs say it isnt.\n await client.disconnect()\n LOGGER.debug(\"%s: Disconnected\", self.name)\n \n def local_callback(self):\n # Placeholder to be replaced by a call from light.py\n # I can't work out how to plumb a callback from here to light.py\n return"
},
{
"identifier": "DOMAIN",
"path": "custom_components/ideal_led/const.py",
"snippet": "DOMAIN = \"ideal_led\""
}
] | import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from typing import Any, Optional, Tuple
from .idealled import IDEALLEDInstance
from .const import DOMAIN
from homeassistant.const import CONF_MAC
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.components.light import (
PLATFORM_SCHEMA,
ATTR_BRIGHTNESS,
ATTR_RGB_COLOR,
ATTR_EFFECT,
ColorMode,
LightEntity,
LightEntityFeature,
)
from homeassistant.util.color import match_max_scale
from homeassistant.helpers import device_registry | 3,507 |
LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_MAC): cv.string})
async def async_setup_entry(hass, config_entry, async_add_devices):
|
LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_MAC): cv.string})
async def async_setup_entry(hass, config_entry, async_add_devices): | instance = hass.data[DOMAIN][config_entry.entry_id] | 1 | 2023-12-14 08:01:32+00:00 | 4k |
amirzandieh/HyperAttention | benchmark_single_attention.py | [
{
"identifier": "flash_attn_func",
"path": "src/flash_attn_triton.py",
"snippet": "def _fwd_kernel(\n Q,\n K,\n V,\n Bias,\n Out,\n Lse,\n softmax_scale,\n stride_qb,\n stride_qh,\n stride_qm,\n stride_kb,\n stride_kh,\n stride_kn,\n stride_vb,\n stride_vh,\n stride_vn,\n stride_bb,\n stride_bh,\n stride_bm,\n stride_ob,\n stride_oh,\n stride_om,\n nheads,\n seqlen_q,\n seqlen_k,\n seqlen_q_rounded,\n headdim,\n CACHE_KEY_SEQLEN_Q,\n CACHE_KEY_SEQLEN_K,\n BIAS_TYPE: tl.constexpr,\n IS_CAUSAL: tl.constexpr,\n BLOCK_HEADDIM: tl.constexpr,\n EVEN_M: tl.constexpr,\n EVEN_N: tl.constexpr,\n EVEN_HEADDIM: tl.constexpr,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n):\ndef _bwd_preprocess_do_o_dot(\n Out,\n DO,\n Delta,\n stride_ob,\n stride_oh,\n stride_om,\n stride_dob,\n stride_doh,\n stride_dom,\n nheads,\n seqlen_q,\n seqlen_q_rounded,\n headdim,\n BLOCK_M: tl.constexpr,\n BLOCK_HEADDIM: tl.constexpr,\n):\ndef _bwd_store_dx(\n dx_ptrs,\n dx,\n offs_n,\n offs_d,\n seqlen,\n headdim,\n EVEN_M: tl.constexpr,\n EVEN_N: tl.constexpr,\n even_headdim,\n):\ndef _bwd_kernel_one_col_block(\n start_n,\n Q,\n K,\n V,\n Bias,\n DO,\n DQ,\n DK,\n DV,\n LSE,\n D,\n softmax_scale,\n stride_qm,\n stride_kn,\n stride_vn,\n stride_bm,\n stride_dom,\n stride_dqm,\n stride_dkn,\n stride_dvn,\n seqlen_q,\n seqlen_k,\n headdim,\n ATOMIC_ADD: tl.constexpr,\n BIAS_TYPE: tl.constexpr,\n IS_CAUSAL: tl.constexpr,\n BLOCK_HEADDIM: tl.constexpr,\n EVEN_M: tl.constexpr,\n EVEN_N: tl.constexpr,\n EVEN_HEADDIM: tl.constexpr,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n):\ndef init_to_zero(name):\ndef _bwd_kernel(\n Q,\n K,\n V,\n Bias,\n DO,\n DQ,\n DK,\n DV,\n LSE,\n D,\n softmax_scale,\n stride_qb,\n stride_qh,\n stride_qm,\n stride_kb,\n stride_kh,\n stride_kn,\n stride_vb,\n stride_vh,\n stride_vn,\n stride_bb,\n stride_bh,\n stride_bm,\n stride_dob,\n stride_doh,\n stride_dom,\n stride_dqb,\n stride_dqh,\n stride_dqm,\n stride_dkb,\n stride_dkh,\n stride_dkn,\n stride_dvb,\n stride_dvh,\n stride_dvn,\n nheads,\n seqlen_q,\n seqlen_k,\n seqlen_q_rounded,\n headdim,\n CACHE_KEY_SEQLEN_Q,\n CACHE_KEY_SEQLEN_K,\n BIAS_TYPE: tl.constexpr,\n IS_CAUSAL: tl.constexpr,\n BLOCK_HEADDIM: tl.constexpr,\n SEQUENCE_PARALLEL: tl.constexpr,\n EVEN_M: tl.constexpr,\n EVEN_N: tl.constexpr,\n EVEN_HEADDIM: tl.constexpr,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n):\ndef _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):\ndef _flash_attn_backward(\n do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None\n):\n def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):\n def backward(ctx, do, dlse_use_needed=None):\n BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)\n BLOCK = 128\n BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)\nclass FlashAttnFunc(torch.autograd.Function):"
},
{
"identifier": "HyperAttention",
"path": "hyper_attention.py",
"snippet": "class HyperAttention(torch.nn.Module):\n\n def __init__(self, input_dim=64, lsh_num_projs=8, block_size=256, sample_size=256, min_seq_len=2048,\n smooth_block=False, **kwargs):\n \"\"\"\n - block_size and sample_size must be divisible by 128\n \"\"\"\n super().__init__()\n self.input_dim = input_dim\n self.lsh_num_projs = lsh_num_projs\n self.block_size = block_size\n self.sample_size = sample_size\n self.min_seq_len = min_seq_len\n self.smooth_block = smooth_block\n self.lsh = AngularLSHTriton(num_projs=self.lsh_num_projs, dim=(1, 1, input_dim))\n\n def forward(self, query: torch.tensor, key: torch.tensor, value: torch.tensor, scale=None, causal=False,\n return_lse=False):\n \"\"\"\n Forward function for HyperAttention. If no causal masking, simply invokes forward_no_causal_mask method.\n If there is causal masking, it partitions the attention matrix and recurses on the partitions.\n inputs:\n - query, key, and valu: must have same sequence lengths but dimension of values vectors can be different\n from that of query or key\n - sequence lengths must be divisible by block_size\n output:\n - attn: (approximation of) the final attention output tensor\n - lse: (approximation of) log sum exp of the qk matrix\n \"\"\"\n query = query.contiguous()\n key = key.contiguous()\n value = value.contiguous()\n\n n_query = query.shape[2]\n batch_size, n_heads, n_key, dim = key.shape\n scale = scale or dim ** (-0.5)\n assert n_query == n_key\n\n # without causal masking\n if causal is False:\n attn, lse = self.forward_no_causal_mask(query, key, value, scale)\n\n else: # with causal masking\n if n_key <= self.min_seq_len:\n attn, lse = flash_attn_func(query.transpose(1, 2),\n key.transpose(1, 2),\n value.transpose(1, 2),\n None, True, scale)\n attn = attn.transpose(1, 2)\n\n else:\n # If n_query is odd we pad inputs by zero rows\n if n_query % 2:\n query = torch.nn.functional.pad(query, (0, 0, 0, 1), mode='constant', value=0.)\n key = torch.nn.functional.pad(key, (0, 0, 0, 1), mode='constant', value=0.)\n value = torch.nn.functional.pad(value, (0, 0, 0, 1), mode='constant', value=0.)\n\n # extract block diagonal parts\n q_bd = query.view(batch_size, 2 * n_heads, query.shape[2] // 2, query.shape[-1])\n k_bd = key.view(batch_size, 2 * n_heads, key.shape[2] // 2, key.shape[-1])\n v_bd = value.view(batch_size, 2 * n_heads, key.shape[2] // 2, value.shape[-1])\n\n attn_bd, lse_bd = self.forward(q_bd, k_bd, v_bd, scale, True, True)\n\n if attn_bd.shape[2] not in attn_bd.stride():\n attn_bd = attn_bd.contiguous()\n attn_bd = attn_bd.view(batch_size, n_heads, -1, dim)\n\n if lse_bd.shape[2] not in lse_bd.stride():\n lse_bd = lse_bd.contiguous()\n lse_bd = lse_bd.view(batch_size, n_heads, -1, 1)\n\n # lowe diagonal block is an unmasked attention\n attn_unmasked, lse_unmasked = self.forward_no_causal_mask(\n query[:, :, key.shape[2] // 2:, :], key[:, :, :key.shape[2] // 2, :],\n value[:, :, :key.shape[2] // 2, :], scale)\n\n attn_up, lse_up = attn_bd[:, :, :query.shape[2] // 2, :], lse_bd[:, :, :query.shape[2] // 2, :]\n attn_down, lse_down = add_self_attentions(attn_bd[:, :, query.shape[2] // 2:, :],\n lse_bd[:, :, query.shape[2] // 2:, :],\n attn_unmasked, lse_unmasked)\n\n attn = torch.cat((attn_up, attn_down), dim=-2)\n lse = torch.cat((lse_up, lse_down), dim=-2)\n\n if n_query % 2:\n attn = attn[:, :, :-1, :]\n lse = lse[:, :, :-1, :]\n\n if not return_lse:\n return attn\n else:\n return attn, lse\n\n def forward_no_causal_mask(self, query, key, value, scale):\n \"\"\"\n - sequence lengths must be divisible by block_size\n \"\"\"\n batch_size, head_size, n_query, dim = query.shape\n\n if self.min_seq_len > n_query:\n attn, lse = flash_attn_func(query.transpose(1, 2),\n key.transpose(1, 2),\n value.transpose(1, 2),\n None, False, scale)\n else:\n # Hash keys and queries via SortLSH and obtain buckets\n _, query_sort_idx = torch.sort(self.lsh.hash_triton(query), dim=2, stable=True) # batch_size x head_size x n\n _, key_sort_idx = torch.sort(self.lsh.hash_triton(key), dim=2, stable=True)\n\n # Now run hyper attention function on q,k,v and the permutations\n attn, lse = hyper_attn_func(query.transpose(1, 2),\n key.transpose(1, 2),\n value.transpose(1, 2),\n query_sort_idx.transpose(1, 2),\n key_sort_idx.transpose(1, 2),\n self.block_size,\n self.sample_size,\n scale,\n self.smooth_block,\n )\n attn = attn.transpose(1, 2)\n\n return attn, lse.unsqueeze(-1)"
}
] | import argparse
import torch
import triton
from src.flash_attn_triton import flash_attn_func
from hyper_attention import HyperAttention
from flash_attn import flash_attn_func as flash_attn_func_cuda | 3,431 |
try:
except ImportError:
flash_attn_func_cuda = None
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--no_causal", action="store_true")
parser.add_argument("--smooth_block", action="store_true")
parser.add_argument("--mode", type=str, default="fwd+bwd", choices=['fwd', 'bwd', 'fwd+bwd'])
parser.add_argument("--attn_method", type=str, default="flash",
choices=['flash', 'flash-cuda', 'hyper'])
return parser.parse_args()
def get_tensors(batch_size, seq_len, head_size, dim):
q = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
k = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
v = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
return q, k, v
def run_flash_attn(batch_size, head_size, seq_len, dim, causal, mode, impl="triton", warmup=20, rep=100):
q, k, v = get_tensors(batch_size, seq_len, head_size, dim)
if impl == "cuda":
if flash_attn_func_cuda is None:
raise ImportError("Please install flash_attn (pip install flash-attn --no-build-isolation)")
fn = lambda: flash_attn_func_cuda(q, k, v, causal=causal)
else:
fn = lambda: flash_attn_func(q, k, v, None, causal, None)[0]
if mode == 'fwd':
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
elif mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
else: # mode == 'fwd+bwd'
q20_fwd, median_fwd, q80_fwd = triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
q20_bwd, median_bwd, q80_bwd = triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
return q20_fwd + q20_bwd, median_fwd + median_bwd, q80_fwd + q80_bwd
def run_hyper_attn(batch_size, head_size, seq_len, dim, causal, mode, smooth_block, warmup=20, rep=100):
q, k, v = get_tensors(batch_size, head_size, seq_len, dim)
block_size = 256
sample_size = 256
|
try:
except ImportError:
flash_attn_func_cuda = None
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--no_causal", action="store_true")
parser.add_argument("--smooth_block", action="store_true")
parser.add_argument("--mode", type=str, default="fwd+bwd", choices=['fwd', 'bwd', 'fwd+bwd'])
parser.add_argument("--attn_method", type=str, default="flash",
choices=['flash', 'flash-cuda', 'hyper'])
return parser.parse_args()
def get_tensors(batch_size, seq_len, head_size, dim):
q = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
k = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
v = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
return q, k, v
def run_flash_attn(batch_size, head_size, seq_len, dim, causal, mode, impl="triton", warmup=20, rep=100):
q, k, v = get_tensors(batch_size, seq_len, head_size, dim)
if impl == "cuda":
if flash_attn_func_cuda is None:
raise ImportError("Please install flash_attn (pip install flash-attn --no-build-isolation)")
fn = lambda: flash_attn_func_cuda(q, k, v, causal=causal)
else:
fn = lambda: flash_attn_func(q, k, v, None, causal, None)[0]
if mode == 'fwd':
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
elif mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
else: # mode == 'fwd+bwd'
q20_fwd, median_fwd, q80_fwd = triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
q20_bwd, median_bwd, q80_bwd = triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
return q20_fwd + q20_bwd, median_fwd + median_bwd, q80_fwd + q80_bwd
def run_hyper_attn(batch_size, head_size, seq_len, dim, causal, mode, smooth_block, warmup=20, rep=100):
q, k, v = get_tensors(batch_size, head_size, seq_len, dim)
block_size = 256
sample_size = 256
| attn = HyperAttention( | 1 | 2023-12-08 21:28:22+00:00 | 4k |
Psivant/femto | femto/md/tests/test_system.py | [
{
"identifier": "CDK2_SYSTEM",
"path": "femto/fe/tests/systems.py",
"snippet": "CDK2_SYSTEM = TestSystem(\n directory=CDK2_DATA_DIR,\n receptor_name=\"cdk2\",\n receptor_coords=CDK2_DATA_DIR / \"cdk2.pdb\",\n receptor_params=None,\n receptor_cavity_mask=\":12,14,16,22,84,87,88,134,146,147 & @CA\",\n receptor_ref_atoms=(\"@1\", \"@2\", \"@3\"),\n ligand_1_name=\"1h1q\",\n ligand_1_coords=CDK2_DATA_DIR / \"1h1q.rst7\",\n ligand_1_params=CDK2_DATA_DIR / \"1h1q.parm7\",\n ligand_1_ref_atoms=(\"@14\", \"@21\", \"@18\"),\n ligand_2_name=\"1oiu\",\n ligand_2_coords=CDK2_DATA_DIR / \"1oiu.rst7\",\n ligand_2_params=CDK2_DATA_DIR / \"1oiu.parm7\",\n ligand_2_ref_atoms=(\"@16\", \"@23\", \"@20\"),\n)"
},
{
"identifier": "TEMOA_SYSTEM",
"path": "femto/fe/tests/systems.py",
"snippet": "TEMOA_SYSTEM = TestSystem(\n directory=TEMOA_DATA_DIR,\n receptor_name=\"temoa\",\n receptor_coords=TEMOA_DATA_DIR / \"temoa.rst7\",\n receptor_params=TEMOA_DATA_DIR / \"temoa.parm7\",\n receptor_cavity_mask=\"@1-40\",\n receptor_ref_atoms=(\"@1\", \"@2\", \"@3\"),\n ligand_1_name=\"g1\",\n ligand_1_coords=TEMOA_DATA_DIR / \"g1.rst7\",\n ligand_1_params=TEMOA_DATA_DIR / \"g1.parm7\",\n ligand_1_ref_atoms=(\"@8\", \"@6\", \"@4\"),\n ligand_2_name=\"g4\",\n ligand_2_coords=TEMOA_DATA_DIR / \"g4.rst7\",\n ligand_2_params=TEMOA_DATA_DIR / \"g4.parm7\",\n ligand_2_ref_atoms=(\"@3\", \"@5\", \"@1\"),\n)"
},
{
"identifier": "LIGAND_1_RESIDUE_NAME",
"path": "femto/md/constants.py",
"snippet": "LIGAND_1_RESIDUE_NAME = \"L1\""
},
{
"identifier": "LIGAND_2_RESIDUE_NAME",
"path": "femto/md/constants.py",
"snippet": "LIGAND_2_RESIDUE_NAME = \"R1\""
},
{
"identifier": "apply_hmr",
"path": "femto/md/system.py",
"snippet": "def apply_hmr(\n system: openmm.System,\n topology: parmed.Structure,\n hydrogen_mass: openmm.unit.Quantity = 1.5 * openmm.unit.amu,\n):\n \"\"\"Apply hydrogen mass repartitioning to a system.\n\n Args:\n system: The system to modify in-place.\n topology: The topology of the system.\n hydrogen_mass: The mass to use ofr hydrogen atoms.\n \"\"\"\n\n for bond in topology.bonds:\n atom_1, atom_2 = bond.atom1, bond.atom2\n\n if atom_1.atomic_number == 1:\n (atom_1, atom_2) = (atom_2, atom_1)\n\n if atom_2.atomic_number != 1:\n continue\n if atom_1.atomic_number == 1:\n continue\n\n elements = sorted(a.atomic_number for a in atom_2.residue.atoms)\n\n if elements == [1, 1, 8]:\n continue\n\n mass_delta = hydrogen_mass - system.getParticleMass(atom_2.idx)\n\n system.setParticleMass(atom_2.idx, hydrogen_mass)\n system.setParticleMass(\n atom_1.idx, system.getParticleMass(atom_1.idx) - mass_delta\n )"
},
{
"identifier": "load_ligand",
"path": "femto/md/system.py",
"snippet": "def load_ligand(\n coord_path: pathlib.Path, param_path: pathlib.Path, residue_name: str | None = None\n) -> parmed.amber.AmberParm:\n \"\"\"Load a ligand from its coordinate and parameter definition.\n\n Args:\n coord_path: The path to the ligand coordinate file (.rst7 / .mol2)\n param_path: The path to the ligand parameter file (.parm7)\n residue_name: The (optional) residue name to assign to the ligand.\n\n Returns:\n The loaded ligand\n \"\"\"\n\n ligand = parmed.amber.AmberParm(str(param_path), str(coord_path))\n\n for residue in ligand.residues:\n residue.name = residue_name\n\n ligand.parm_data[\"RESIDUE_LABEL\"] = [residue_name]\n\n return ligand"
},
{
"identifier": "load_ligands",
"path": "femto/md/system.py",
"snippet": "def load_ligands(\n ligand_1_coords: pathlib.Path,\n ligand_1_params: pathlib.Path,\n ligand_2_coords: pathlib.Path | None,\n ligand_2_params: pathlib.Path | None,\n) -> tuple[parmed.amber.AmberParm, parmed.amber.AmberParm | None]:\n \"\"\"Load the first, and optionally second, ligand from their coordinates and\n parameters.\n\n Args:\n ligand_1_coords: The coordinates of the first ligand.\n ligand_1_params: The parameters of the first ligand.\n ligand_2_coords: The (optional) coordinates of the second ligand.\n ligand_2_params: The (optional) parameters of the second ligand.\n\n Returns:\n The loaded ligands.\n \"\"\"\n\n assert (ligand_2_params is None and ligand_2_coords is None) or (\n ligand_2_params is not None and ligand_2_coords is not None\n ), \"both or neither of ligand_2_coords and ligand_2_params must be provided\"\n\n ligand_1 = load_ligand(ligand_1_coords, ligand_1_params, LIGAND_1_RESIDUE_NAME)\n\n if ligand_2_coords is None:\n return ligand_1, None\n\n ligand_2 = load_ligand(ligand_2_coords, ligand_2_params, LIGAND_2_RESIDUE_NAME)\n\n return ligand_1, ligand_2"
},
{
"identifier": "load_receptor",
"path": "femto/md/system.py",
"snippet": "def load_receptor(\n coord_path: pathlib.Path,\n param_path: pathlib.Path | None,\n tleap_sources: list[str] | None = None,\n) -> parmed.amber.AmberParm:\n \"\"\"Loads a receptor from its coordinates and optionally parameters.\n\n If no parameters are provided, the receptor will be parameterized using tLeap.\n\n Args:\n coord_path: The coordinates of the receptor.\n param_path: The parameters of the receptor.\n tleap_sources: The tLeap sources to use to parameterize the receptor.\n See ``femto.md.config.DEFAULT_TLEAP_SOURCES`` for the defaults.\n\n Returns:\n The loaded receptor.\n \"\"\"\n tleap_sources = (\n femto.md.config.DEFAULT_TLEAP_SOURCES\n if tleap_sources is None\n else tleap_sources\n )\n\n if param_path is not None:\n return parmed.amber.AmberParm(str(param_path), str(coord_path))\n\n receptor = parmed.load_file(str(coord_path), structure=True)\n\n _LOGGER.info(\n f\"no receptor parameters provided, the receptor will parameterize using \"\n f\"tLeap: {tleap_sources}\"\n )\n return femto.md.utils.amber.parameterize_structure(receptor, tleap_sources)"
},
{
"identifier": "build_mock_structure",
"path": "femto/md/tests/mocking.py",
"snippet": "def build_mock_structure(smiles: list[str]) -> parmed.Structure:\n \"\"\"Build a mock structure from a list of SMILES patterns\n\n Notes:\n * A conformer is generated for each molecule.\n\n Args:\n smiles: A list of SMILES patterns.\n\n Returns:\n The mock structure.\n \"\"\"\n molecules = [Chem.MolFromSmiles(pattern) for pattern in smiles]\n\n for molecule, pattern in zip(molecules, smiles, strict=True):\n assert molecule is not None, f\"{pattern} is not a valid SMILES pattern\"\n\n complex = Chem.Mol()\n\n for i, molecule in enumerate(molecules):\n molecule = Chem.AddHs(molecule)\n AllChem.EmbedMolecule(molecule)\n\n is_water = Chem.MolToSmiles(Chem.RemoveHs(molecule)) == \"O\"\n\n residue_name = (\n \"WAT\"\n if is_water\n else (\n f\"{molecule.GetAtomWithIdx(0).GetSymbol()}\"\n if molecule.GetNumAtoms() == 1\n else \"UNK\"\n )\n )\n symbol_count = collections.defaultdict(int)\n\n for atom in molecule.GetAtoms():\n atom_name = f\"{atom.GetSymbol()}{symbol_count[atom.GetSymbol()] + 1}\"\n atom_info = Chem.AtomPDBResidueInfo(\n atom_name.ljust(4, \" \"), atom.GetIdx(), \"\", residue_name, i\n )\n atom.SetMonomerInfo(atom_info)\n\n symbol_count[atom.GetSymbol()] += 1\n\n complex = Chem.CombineMols(complex, molecule)\n\n with tempfile.NamedTemporaryFile(suffix=\".pdb\") as tmp_file:\n Chem.MolToPDBFile(complex, tmp_file.name)\n structure = parmed.load_file(tmp_file.name, structure=True)\n\n return structure"
},
{
"identifier": "is_close",
"path": "femto/md/utils/openmm.py",
"snippet": "def is_close(\n v1: openmm.unit.Quantity,\n v2: openmm.unit.Quantity,\n rtol=1.0e-5,\n atol=1.0e-8,\n equal_nan=False,\n) -> bool | numpy.ndarray:\n \"\"\"Compares if two unit wrapped values are close using ``numpy.is_close``\"\"\"\n\n if not v1.unit.is_compatible(v2.unit):\n return False\n\n return numpy.isclose(\n v1.value_in_unit(v1.unit),\n v2.value_in_unit(v1.unit),\n atol=atol,\n rtol=rtol,\n equal_nan=equal_nan,\n )"
}
] | import openmm
import openmm.unit
import parmed
from femto.fe.tests.systems import CDK2_SYSTEM, TEMOA_SYSTEM
from femto.md.constants import LIGAND_1_RESIDUE_NAME, LIGAND_2_RESIDUE_NAME
from femto.md.system import apply_hmr, load_ligand, load_ligands, load_receptor
from femto.md.tests.mocking import build_mock_structure
from femto.md.utils.openmm import is_close | 3,324 |
def test_hmr():
topology = build_mock_structure(["CC"])
system = openmm.System()
system.addParticle(12.0 * openmm.unit.amu)
system.addParticle(12.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
original_mass = sum(
[system.getParticleMass(i) for i in range(system.getNumParticles())],
0.0 * openmm.unit.amu,
)
expected_h_mass = 1.5 * openmm.unit.amu
apply_hmr(system, topology, hydrogen_mass=expected_h_mass)
new_masses = [system.getParticleMass(i) for i in range(system.getNumParticles())]
new_mass = sum(new_masses, 0.0 * openmm.unit.amu)
assert is_close(new_mass, original_mass)
expected_masses = [
(12.0 - 0.5 * 3.0) * openmm.unit.amu,
(12.0 - 0.5 * 3.0) * openmm.unit.amu,
] + ([expected_h_mass] * 6)
assert all(
is_close(new_mass, expected_mass)
for new_mass, expected_mass in zip(new_masses, expected_masses, strict=True)
)
def test_hmr_water():
"""HMR should not modify water molecules."""
topology = build_mock_structure(["O"])
expected_masses = [16.0, 1.0, 1.0] * openmm.unit.amu
system = openmm.System()
for mass in expected_masses:
system.addParticle(mass)
apply_hmr(system, topology)
new_masses = [system.getParticleMass(i) for i in range(system.getNumParticles())]
assert all(
is_close(new_mass, expected_mass)
for new_mass, expected_mass in zip(new_masses, expected_masses, strict=True)
)
def test_load_ligand():
reside_name = "ABC"
ligand = load_ligand(
CDK2_SYSTEM.ligand_1_coords, CDK2_SYSTEM.ligand_1_params, reside_name
)
assert len(ligand[":ABC"].atoms) == len(ligand.atoms)
def test_load_ligands():
coord_path = CDK2_SYSTEM.ligand_1_coords
param_path = CDK2_SYSTEM.ligand_1_params
ligand_1, ligand_2 = load_ligands(coord_path, param_path, None, None)
assert ligand_2 is None
assert ligand_1.residues[0].name == LIGAND_1_RESIDUE_NAME
ligand_1, ligand_2 = load_ligands(coord_path, param_path, coord_path, param_path)
assert isinstance(ligand_2, parmed.Structure)
assert ligand_1.residues[0].name == LIGAND_1_RESIDUE_NAME
assert ligand_2.residues[0].name == LIGAND_2_RESIDUE_NAME
def test_load_receptor_with_params(mocker):
mock_parameterize = mocker.patch(
"femto.md.utils.amber.parameterize_structure", autospec=True
)
|
def test_hmr():
topology = build_mock_structure(["CC"])
system = openmm.System()
system.addParticle(12.0 * openmm.unit.amu)
system.addParticle(12.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
original_mass = sum(
[system.getParticleMass(i) for i in range(system.getNumParticles())],
0.0 * openmm.unit.amu,
)
expected_h_mass = 1.5 * openmm.unit.amu
apply_hmr(system, topology, hydrogen_mass=expected_h_mass)
new_masses = [system.getParticleMass(i) for i in range(system.getNumParticles())]
new_mass = sum(new_masses, 0.0 * openmm.unit.amu)
assert is_close(new_mass, original_mass)
expected_masses = [
(12.0 - 0.5 * 3.0) * openmm.unit.amu,
(12.0 - 0.5 * 3.0) * openmm.unit.amu,
] + ([expected_h_mass] * 6)
assert all(
is_close(new_mass, expected_mass)
for new_mass, expected_mass in zip(new_masses, expected_masses, strict=True)
)
def test_hmr_water():
"""HMR should not modify water molecules."""
topology = build_mock_structure(["O"])
expected_masses = [16.0, 1.0, 1.0] * openmm.unit.amu
system = openmm.System()
for mass in expected_masses:
system.addParticle(mass)
apply_hmr(system, topology)
new_masses = [system.getParticleMass(i) for i in range(system.getNumParticles())]
assert all(
is_close(new_mass, expected_mass)
for new_mass, expected_mass in zip(new_masses, expected_masses, strict=True)
)
def test_load_ligand():
reside_name = "ABC"
ligand = load_ligand(
CDK2_SYSTEM.ligand_1_coords, CDK2_SYSTEM.ligand_1_params, reside_name
)
assert len(ligand[":ABC"].atoms) == len(ligand.atoms)
def test_load_ligands():
coord_path = CDK2_SYSTEM.ligand_1_coords
param_path = CDK2_SYSTEM.ligand_1_params
ligand_1, ligand_2 = load_ligands(coord_path, param_path, None, None)
assert ligand_2 is None
assert ligand_1.residues[0].name == LIGAND_1_RESIDUE_NAME
ligand_1, ligand_2 = load_ligands(coord_path, param_path, coord_path, param_path)
assert isinstance(ligand_2, parmed.Structure)
assert ligand_1.residues[0].name == LIGAND_1_RESIDUE_NAME
assert ligand_2.residues[0].name == LIGAND_2_RESIDUE_NAME
def test_load_receptor_with_params(mocker):
mock_parameterize = mocker.patch(
"femto.md.utils.amber.parameterize_structure", autospec=True
)
| receptor = load_receptor(TEMOA_SYSTEM.receptor_coords, TEMOA_SYSTEM.receptor_params) | 1 | 2023-12-07 15:28:18+00:00 | 4k |
AIFSH/NativeDancer | nativedancer/third_part/detectron2/modeling/backbone/regnet.py | [
{
"identifier": "get_norm",
"path": "nativedancer/third_part/detectron2/layers/batch_norm.py",
"snippet": "def get_norm(norm, out_channels):\n \"\"\"\n Args:\n norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;\n or a callable that takes a channel number and returns\n the normalization layer as a nn.Module.\n\n Returns:\n nn.Module or None: the normalization layer\n \"\"\"\n if norm is None:\n return None\n if isinstance(norm, str):\n if len(norm) == 0:\n return None\n norm = {\n \"BN\": BatchNorm2d,\n # Fixed in https://github.com/pytorch/pytorch/pull/36382\n \"SyncBN\": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,\n \"FrozenBN\": FrozenBatchNorm2d,\n \"GN\": lambda channels: nn.GroupNorm(32, channels),\n # for debugging:\n \"nnSyncBN\": nn.SyncBatchNorm,\n \"naiveSyncBN\": NaiveSyncBatchNorm,\n # expose stats_mode N as an option to caller, required for zero-len inputs\n \"naiveSyncBN_N\": lambda channels: NaiveSyncBatchNorm(channels, stats_mode=\"N\"),\n \"LN\": lambda channels: LayerNorm(channels),\n }[norm]\n return norm(out_channels)"
},
{
"identifier": "ShapeSpec",
"path": "nativedancer/third_part/detectron2/layers/shape_spec.py",
"snippet": "class ShapeSpec:\n \"\"\"\n A simple structure that contains basic shape specification about a tensor.\n It is often used as the auxiliary inputs/outputs of models,\n to complement the lack of shape inference ability among pytorch modules.\n \"\"\"\n\n channels: Optional[int] = None\n height: Optional[int] = None\n width: Optional[int] = None\n stride: Optional[int] = None"
},
{
"identifier": "CNNBlockBase",
"path": "nativedancer/third_part/detectron2/layers/blocks.py",
"snippet": "class CNNBlockBase(nn.Module):\n \"\"\"\n A CNN block is assumed to have input channels, output channels and a stride.\n The input and output of `forward()` method must be NCHW tensors.\n The method can perform arbitrary computation but must match the given\n channels and stride specification.\n\n Attribute:\n in_channels (int):\n out_channels (int):\n stride (int):\n \"\"\"\n\n def __init__(self, in_channels, out_channels, stride):\n \"\"\"\n The `__init__` method of any subclass should also contain these arguments.\n\n Args:\n in_channels (int):\n out_channels (int):\n stride (int):\n \"\"\"\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.stride = stride\n\n def freeze(self):\n \"\"\"\n Make this block not trainable.\n This method sets all parameters to `requires_grad=False`,\n and convert all BatchNorm layers to FrozenBatchNorm\n\n Returns:\n the block itself\n \"\"\"\n for p in self.parameters():\n p.requires_grad = False\n FrozenBatchNorm2d.convert_frozen_batchnorm(self)\n return self"
},
{
"identifier": "Backbone",
"path": "nativedancer/third_part/detectron2/modeling/backbone/backbone.py",
"snippet": "class Backbone(nn.Module, metaclass=ABCMeta):\n \"\"\"\n Abstract base class for network backbones.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The `__init__` method of any subclass can specify its own set of arguments.\n \"\"\"\n super().__init__()\n\n @abstractmethod\n def forward(self):\n \"\"\"\n Subclasses must override this method, but adhere to the same return type.\n\n Returns:\n dict[str->Tensor]: mapping from feature name (e.g., \"res2\") to tensor\n \"\"\"\n pass\n\n @property\n def size_divisibility(self) -> int:\n \"\"\"\n Some backbones require the input height and width to be divisible by a\n specific integer. This is typically true for encoder / decoder type networks\n with lateral connection (e.g., FPN) for which feature maps need to match\n dimension in the \"bottom up\" and \"top down\" paths. Set to 0 if no specific\n input size divisibility is required.\n \"\"\"\n return 0\n\n @property\n def padding_constraints(self) -> Dict[str, int]:\n \"\"\"\n This property is a generalization of size_divisibility. Some backbones and training\n recipes require specific padding constraints, such as enforcing divisibility by a specific\n integer (e.g., FPN) or padding to a square (e.g., ViTDet with large-scale jitter\n in :paper:vitdet). `padding_constraints` contains these optional items like:\n {\n \"size_divisibility\": int,\n \"square_size\": int,\n # Future options are possible\n }\n `size_divisibility` will read from here if presented and `square_size` indicates the\n square padding size if `square_size` > 0.\n\n TODO: use type of Dict[str, int] to avoid torchscipt issues. The type of padding_constraints\n could be generalized as TypedDict (Python 3.8+) to support more types in the future.\n \"\"\"\n return {}\n\n def output_shape(self):\n \"\"\"\n Returns:\n dict[str->ShapeSpec]\n \"\"\"\n # this is a backward-compatible default\n return {\n name: ShapeSpec(\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\n )\n for name in self._out_features\n }"
}
] | import numpy as np
from torch import nn
from ...layers import CNNBlockBase, ShapeSpec, get_norm
from .backbone import Backbone | 1,873 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Implementation of RegNet models from :paper:`dds` and :paper:`scaling`.
This code is adapted from https://github.com/facebookresearch/pycls with minimal modifications.
Some code duplication exists between RegNet and ResNets (e.g., ResStem) in order to simplify
model loading.
"""
__all__ = [
"AnyNet",
"RegNet",
"ResStem",
"SimpleStem",
"VanillaBlock",
"ResBasicBlock",
"ResBottleneckBlock",
]
def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False):
"""Helper for building a conv2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, g, b = stride, (k - 1) // 2, groups, bias
return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b)
def gap2d():
"""Helper for building a global average pooling layer."""
return nn.AdaptiveAvgPool2d((1, 1))
def pool2d(k, *, stride=1):
"""Helper for building a pool2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2)
def init_weights(m):
"""Performs ResNet-style weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Implementation of RegNet models from :paper:`dds` and :paper:`scaling`.
This code is adapted from https://github.com/facebookresearch/pycls with minimal modifications.
Some code duplication exists between RegNet and ResNets (e.g., ResStem) in order to simplify
model loading.
"""
__all__ = [
"AnyNet",
"RegNet",
"ResStem",
"SimpleStem",
"VanillaBlock",
"ResBasicBlock",
"ResBottleneckBlock",
]
def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False):
"""Helper for building a conv2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, g, b = stride, (k - 1) // 2, groups, bias
return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b)
def gap2d():
"""Helper for building a global average pooling layer."""
return nn.AdaptiveAvgPool2d((1, 1))
def pool2d(k, *, stride=1):
"""Helper for building a pool2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2)
def init_weights(m):
"""Performs ResNet-style weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
| class ResStem(CNNBlockBase): | 2 | 2023-12-10 20:14:00+00:00 | 4k |
ethanweber/nerfiller | nerfiller/guidance/multiview_metric.py | [
{
"identifier": "get_images_with_keypoints",
"path": "nerfiller/utils/draw_utils.py",
"snippet": "def get_images_with_keypoints(\n images: Float[Tensor, \"B 3 H W\"],\n keypoints: Float[Tensor, \"B N 2\"],\n colors: Optional[Float[Tensor, \"B N 3\"]] = None,\n keypoint_size: int = 10,\n thickness: int = -1,\n):\n \"\"\"Returns the batch of images with keypoints drawn in the colors.\n Images in range [0, 1].\n Keypoints are (x,y) coordinates in range [-1,1].\n Colors are RGB in range (0, 1).\n \"\"\"\n device = images.device\n b, _, h, w = images.shape\n _, N, _ = keypoints.shape\n\n if colors is None:\n colors = torch.rand((b, N, 3), device=device)\n\n new_images = []\n for idx in range(b):\n im = np.ascontiguousarray(\n (images[idx].permute(1, 2, 0).detach().clone().cpu().numpy() * 255.0).astype(\"uint8\")\n ).astype(\"uint8\")\n\n ke = ((keypoints[idx] * 0.5 + 0.5) * torch.tensor([w, h], device=device)) - 0.5\n ke = [(int(x), int(y)) for x, y in ke]\n co = (colors[idx] * 255.0).detach().clone().cpu().numpy().astype(\"uint8\")\n co = [(int(r), int(g), int(b)) for r, g, b in co]\n im = draw_keypoints_on_image(im, ke, co, radius=keypoint_size, thickness=thickness)\n\n new_images.append(im)\n new_images = np.stack(new_images, axis=0)\n new_images = torch.tensor(new_images).permute(0, 3, 1, 2).float().to(device) / 255.0\n return new_images"
},
{
"identifier": "get_images_with_lines",
"path": "nerfiller/utils/draw_utils.py",
"snippet": "def get_images_with_lines(\n images: Float[Tensor, \"B 3 H W\"],\n lines: Float[Tensor, \"B N 2 2\"],\n colors: Optional[Float[Tensor, \"B N 3\"]] = None,\n line_width: int = 2,\n):\n \"\"\"Returns the batch of images with lines drawn in the colors.\n Images in range [0, 1].\n Lines are [(x,y), (x,y)] coordinates in range [-1,1].\n Colors are RGB in range (0, 1).\n \"\"\"\n device = images.device\n b, _, h, w = images.shape\n _, N, _, _ = lines.shape\n\n if colors is None:\n colors = torch.rand((b, N, 3), device=device)\n\n new_images = []\n for idx in range(b):\n im = np.ascontiguousarray(\n (images[idx].permute(1, 2, 0).detach().clone().cpu().numpy() * 255.0).astype(\"uint8\")\n ).astype(\"uint8\")\n\n li = ((lines[idx] * 0.5 + 0.5) * torch.tensor([w, h], device=device)) - 0.5\n co = (colors[idx] * 255.0).detach().clone().cpu().numpy().astype(\"uint8\")\n co = [(int(r), int(g), int(b)) for r, g, b in co]\n li = [((int(p[0, 0]), int(p[0, 1])), (int(p[1, 0]), int(p[1, 1]))) for p in li]\n im = draw_lines_on_image(im, li, co, thickness=line_width)\n\n new_images.append(im)\n new_images = np.stack(new_images, axis=0)\n new_images = torch.tensor(new_images).permute(0, 3, 1, 2).float().to(device) / 255.0\n return new_images"
},
{
"identifier": "reproject",
"path": "nerfiller/utils/depth_utils.py",
"snippet": "def reproject(from_l: Float[Tensor, \"b H W 2\"], depth, from_K, from_c2w, to_K, to_c2w):\n \"\"\"Reproject from camera 2 into camera 1.\"\"\"\n\n device = from_K.device\n BS, H, W, _ = from_l.shape\n\n K2 = from_K\n c2w2 = from_c2w\n K1 = to_K\n c2w1 = to_c2w\n\n size = torch.tensor([W, H], device=device)\n pts = ((from_l * 0.5 + 0.5) * size).permute(0, 3, 1, 2)\n\n d = depth\n ptsh = torch.cat([pts, torch.ones_like(pts[:, :1])], dim=1) * d\n\n Kinv1 = torch.inverse(K1)\n Kinv2 = torch.inverse(K2)\n c2wh1 = torch.cat([c2w1, torch.tensor([[[0, 0, 0, 1]]], device=device)], dim=1)\n c2wh2 = torch.cat([c2w2, torch.tensor([[[0, 0, 0, 1]]], device=device)], dim=1)\n\n # TODO(ethan): avoid needing to do this\n c2wh1[:, :3, 1:3] *= -1\n c2wh2[:, :3, 1:3] *= -1\n\n w2ch1 = torch.inverse(c2wh1)[:, :3]\n w2ch2 = torch.inverse(c2wh2)[:, :3]\n w2ch1 = torch.cat([w2ch1, torch.tensor([[[0, 0, 0, 1]]], device=device)], dim=1)\n w2ch2 = torch.cat([w2ch2, torch.tensor([[[0, 0, 0, 1]]], device=device)], dim=1)\n\n ptsw = torch.bmm(Kinv2, ptsh.view(BS, 3, -1)).view(BS, 3, H, W)\n ptsw = torch.cat([ptsw, torch.ones_like(ptsw[:, :1])], dim=1)\n ptsw = torch.bmm(c2wh2, ptsw.view(BS, 4, -1)).view(BS, 4, H, W)\n ptsw = torch.bmm(w2ch1, ptsw.view(BS, 4, -1)).view(BS, 4, H, W)\n ptsc = torch.bmm(K1, ptsw.view(BS, 4, -1)[:, :3]).view(BS, 3, H, W)\n\n # non-continuous version\n z = ptsc[0, 2]\n x = (ptsc[0, 0] / z).long()\n y = (ptsc[0, 1] / z).long()\n valid = (x >= 0) & (x < W) & (y >= 0) & (y < H)\n valid_depth = torch.zeros_like(depth[0, 0])\n valid_depth[y[valid], x[valid]] = z[valid]\n\n depth_out = ptsc[:, 2:]\n ptsc = ptsc[:, :2] / depth_out\n\n ptsc = (ptsc.permute(0, 2, 3, 1) / size) * 2 - 1\n\n return ptsc, depth_out, valid_depth"
},
{
"identifier": "get_projection_matrix",
"path": "nerfiller/utils/camera_utils.py",
"snippet": "def get_projection_matrix(K: Float[Tensor, \"B 3 3\"], c2w: Float[Tensor, \"B 3 4\"]) -> Float[Tensor, \"B 3 4\"]:\n batch_size = K.shape[0]\n device = K.device\n\n row = torch.tensor([[[0, 0, 0, 1]]], device=device).repeat(batch_size, 1, 1)\n c2wh = torch.cat([c2w, row], dim=1)\n P = torch.bmm(K, torch.inverse(c2wh)[:, :3])\n return P"
}
] | import mediapy
import torch
from kornia.geometry.epipolar import (
compute_correspond_epilines,
fundamental_from_projections,
)
from kornia.geometry.linalg import point_line_distance
from torchmetrics.functional import (
pairwise_cosine_similarity,
)
from nerfiller.utils.draw_utils import (
get_images_with_keypoints,
get_images_with_lines,
)
from nerfiller.utils.depth_utils import (
reproject,
)
from nerfiller.utils.camera_utils import (
get_projection_matrix,
)
from nerfstudio.utils.colormaps import ColormapOptions, apply_colormap
from nerfiller.utils.typing import * | 2,744 | """
Code for epipolar guidance.
"""
class MultiviewMetric(torch.nn.Module):
"""
Computes multi-view consistency loss.
"""
def __init__(self):
super().__init__()
def forward(
self,
features1: Float[Tensor, "B C H W"],
features2: Float[Tensor, "B C H W"],
image1: Float[Tensor, "B 3 Horig Worig"],
image2: Float[Tensor, "B 3 Horig Worig"],
depth1: Optional[Float[Tensor, "B 1 H W"]] = None,
depth2: Optional[Float[Tensor, "B 1 H W"]] = None,
mask1: Optional[Float[Tensor, "B 1 H W"]] = None,
mask2: Optional[Float[Tensor, "B 1 H W"]] = None,
K1: Optional[Float[Tensor, "B 3 3"]] = None,
K2: Optional[Float[Tensor, "B 3 3"]] = None,
c2w1: Optional[Float[Tensor, "B 3 4"]] = None,
c2w2: Optional[Float[Tensor, "B 3 4"]] = None,
output_folder: Optional[Path] = None,
suffix: str = "",
show: bool = False,
display_height: int = 512,
):
pass
class MatchingMetric(MultiviewMetric):
"""
Computes a loss to encourage the depth to give good matches.
"""
def __init__(
self,
lossfeatmult: float = 1.0,
lossdistmult: float = 1.0,
sigma_scalar: float = 1.0,
height_scalar: float = 1.0,
keypoint_size: int = 10,
line_width: int = 4,
eps: float = 1e-6,
thresh: float = 0.018,
):
super().__init__()
self.sigma_scalar = sigma_scalar
self.height_scalar = height_scalar
self.lossfeatmult = lossfeatmult
self.lossdistmult = lossdistmult
self.keypoint_size = keypoint_size
self.line_width = line_width
self.eps = eps
self.thresh = thresh
def compute_matches(
self,
features1: Float[Tensor, "B C H W"],
features2: Float[Tensor, "B C H W"],
K1: Float[Tensor, "B 3 3"],
K2: Float[Tensor, "B 3 3"],
c2w1: Float[Tensor, "B 3 4"],
c2w2: Float[Tensor, "B 3 4"],
output_folder: Optional[Path] = None,
suffix: str = "",
show: bool = False,
display_height: int = 512,
):
| """
Code for epipolar guidance.
"""
class MultiviewMetric(torch.nn.Module):
"""
Computes multi-view consistency loss.
"""
def __init__(self):
super().__init__()
def forward(
self,
features1: Float[Tensor, "B C H W"],
features2: Float[Tensor, "B C H W"],
image1: Float[Tensor, "B 3 Horig Worig"],
image2: Float[Tensor, "B 3 Horig Worig"],
depth1: Optional[Float[Tensor, "B 1 H W"]] = None,
depth2: Optional[Float[Tensor, "B 1 H W"]] = None,
mask1: Optional[Float[Tensor, "B 1 H W"]] = None,
mask2: Optional[Float[Tensor, "B 1 H W"]] = None,
K1: Optional[Float[Tensor, "B 3 3"]] = None,
K2: Optional[Float[Tensor, "B 3 3"]] = None,
c2w1: Optional[Float[Tensor, "B 3 4"]] = None,
c2w2: Optional[Float[Tensor, "B 3 4"]] = None,
output_folder: Optional[Path] = None,
suffix: str = "",
show: bool = False,
display_height: int = 512,
):
pass
class MatchingMetric(MultiviewMetric):
"""
Computes a loss to encourage the depth to give good matches.
"""
def __init__(
self,
lossfeatmult: float = 1.0,
lossdistmult: float = 1.0,
sigma_scalar: float = 1.0,
height_scalar: float = 1.0,
keypoint_size: int = 10,
line_width: int = 4,
eps: float = 1e-6,
thresh: float = 0.018,
):
super().__init__()
self.sigma_scalar = sigma_scalar
self.height_scalar = height_scalar
self.lossfeatmult = lossfeatmult
self.lossdistmult = lossdistmult
self.keypoint_size = keypoint_size
self.line_width = line_width
self.eps = eps
self.thresh = thresh
def compute_matches(
self,
features1: Float[Tensor, "B C H W"],
features2: Float[Tensor, "B C H W"],
K1: Float[Tensor, "B 3 3"],
K2: Float[Tensor, "B 3 3"],
c2w1: Float[Tensor, "B 3 4"],
c2w2: Float[Tensor, "B 3 4"],
output_folder: Optional[Path] = None,
suffix: str = "",
show: bool = False,
display_height: int = 512,
): | P1 = get_projection_matrix(K1, c2w1) | 3 | 2023-12-07 19:12:08+00:00 | 4k |
nnanhuang/Customize-it-3D | ldm/modules/diffusionmodules/openaimodel.py | [
{
"identifier": "checkpoint",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "conv_nd",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "linear",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)"
},
{
"identifier": "avg_pool_nd",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "zero_module",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "normalization",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)"
},
{
"identifier": "timestep_embedding",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding"
},
{
"identifier": "SpatialTransformer",
"path": "ldm/modules/attention.py",
"snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False):\n super().__init__()\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim,\n disable_self_attn=disable_self_attn)\n for d in range(depth)]\n )\n\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n for block in self.transformer_blocks:\n x = block(x, context=context)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n x = self.proj_out(x)\n return x + x_in"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
}
] | from abc import abstractmethod
from functools import partial
from typing import Iterable
from ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from ldm.modules.attention import SpatialTransformer
from ldm.util import exists
from omegaconf.listconfig import ListConfig
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F | 2,531 |
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
|
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels | self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) | 3 | 2023-12-14 11:03:35+00:00 | 4k |
jbarrow/mlx-playground | train.py | [
{
"identifier": "Llama",
"path": "llama/model.py",
"snippet": "class Llama(nn.Module):\n def __init__(self, config: ModelArgs) -> None:\n super().__init__()\n\n self.embedding = nn.Embedding(config.vocab_size, config.dims)\n self.attention = [TransformerBlock(config) for _ in range(config.n_layers)]\n self.norm = nn.RMSNorm(config.dims)\n self.out_proj = nn.Linear(config.dims, config.vocab_size, bias=False)\n\n def __call__(self, idx: mx.array):\n mask = nn.MultiHeadAttention.create_additive_causal_mask(idx.shape[1])\n mask = mask.astype(self.embedding.weight.dtype)\n\n x = self.embedding(idx)\n for encoding_layer in self.attention:\n x = encoding_layer(x, mask)\n x = self.norm(x)\n\n return self.out_proj(x)\n\n def loss(self, x, y):\n logits = self(x)\n losses = nn.losses.cross_entropy(logits, y)\n mx.simplify(losses)\n\n return mx.mean(losses)"
},
{
"identifier": "ModelArgs",
"path": "llama/model.py",
"snippet": "class ModelArgs:\n block_size: int = 16\n vocab_size: int = 65\n n_layers: int = 4\n n_heads: int = 8\n dims: int = 256\n intermediate_size: int = 512\n n_local_heads: int = -1\n head_dim: int = 64\n rope_base: float = 10_000\n norm_eps: float = 1e-5\n n_kv_heads: int = 4\n\n def __post_init__(self):\n if self.n_local_heads == -1:\n self.n_local_heads = self.n_heads\n\n # if self.intermediate_size is None:\n # hidden_dim = 4 * self.dims\n # n_hidden = int(2 * hidden_dim / 3)\n # self.intermediate_size = find_multiple(n_hidden, 256)\n\n self.head_dim = self.dims // self.n_heads"
},
{
"identifier": "AdamW",
"path": "llama/optim.py",
"snippet": "class AdamW(Optimizer):\n r\"\"\"Implementation of the AdamW optimizer [1].\n\n Following the above convention, in contrast with [1], we do not use bias\n correction in the first and second moments for AdamW. We update the weights \n with a weight_decay (λ) value:\n\n .. math::\n\n m_{t+1} &= \\beta_1 m_t + (1 - \\beta_1) g_t \\\\\n v_{t+1} &= \\beta_2 v_t + (1 - \\beta_2) g_t^2 \\\\\n \\hat{m}_{t+1} &= \\frac{m_t}{(1 - \\beta_1^t)}\n \\hat{v}_{t+1} &= \\frac{v_t}{(1 - \\beta_1^t)}\n w_{t+1} &= w_t - \\alpha (\\frac{\\hat{m}_{t+1}}{\\sqrt{\\hat{v}_{t+1} + \\epsilon}} + \\lambda w_t)\n\n [1]: Loshchilov, I. and Hutter, F., 2019. Decoupled weight decay \n regularization. ICLR 2019.\n \"\"\"\n\n def __init__(\n self,\n learning_rate: float,\n betas: List[float] = [0.9, 0.999],\n eps: float = 1e-8,\n weight_decay: float = 0.01,\n ):\n super().__init__()\n\n self.learning_rate = learning_rate\n self.betas = betas\n self.eps = eps\n self.weight_decay = weight_decay\n\n def apply_single(\n self, gradient: mx.array, parameter: mx.array, state: OptimizerState\n ):\n \"\"\"Performs the AdamW parameter update and stores :math:`v` and\n :math:`m` in the optimizer state.\"\"\"\n lr = self.learning_rate\n b1, b2 = self.betas\n eps = self.eps\n wd = self.weight_decay\n\n m = state.get(\"m\", gradient)\n v = state.get(\"v\", mx.square(gradient))\n t = state.get(\"t\", 1)\n m = b1 * m + (1 - b1) * gradient\n v = b2 * v + (1 - b2) * mx.square(gradient)\n state[\"m\"] = m\n state[\"v\"] = v\n state[\"t\"] = t + 1\n\n m_hat = m / (1. - b1 ** t)\n v_hat = v / (1. - b2 ** t)\n\n return parameter - lr * (m_hat / (mx.sqrt(v_hat) + eps) + wd * parameter)"
}
] | from llama.model import Llama, ModelArgs
from llama.optim import AdamW
from mlx.utils import tree_flatten
from tqdm import tqdm
import mlx.optimizers as optim
import mlx.core as mx
import mlx.nn as nn | 1,921 | """
Super simple train.py, getting started without any tokenizers,
and with a very simple training loop.
"""
lines = open("./data/example.txt", "r").read()
vocab = sorted(list(set(lines)))
itos = {i: ch for i, ch in enumerate(vocab)}
stoi = {ch: i for i, ch in enumerate(vocab)}
CONFIG = {
"context_length": 16,
"batch_size": 32,
"steps": 1000,
"learning_rate": 0.001,
}
def encode(s):
return [stoi[ch] for ch in s]
def decode(l):
return "".join([itos[i] for i in l])
def get_batches(
data: mx.array, split: str, batch_size: int, context_window: int, config=CONFIG
) -> tuple[mx.array, mx.array]:
train = data[: int(0.8 * len(data))]
val = data[int(0.8 * len(data)) : int(0.9 * len(data))]
test = data[int(0.9 * len(data)) :]
batch_data = train
if split == "val":
batch_data = val
if split == "test":
batch_data = test
ixs = mx.random.randint(
0, batch_data.shape[0] - context_window - 1, shape=(batch_size,)
).tolist()
# create B x C tensors of x and y
x = mx.concatenate(
[mx.expand_dims(batch_data[ix : ix + context_window], 0) for ix in ixs], axis=0
)
y = mx.concatenate(
[mx.expand_dims(batch_data[ix + 1 : ix + context_window + 1], 0) for ix in ixs],
axis=0,
)
return x, y
def evaluate_loss(model, config=CONFIG) -> dict[str, mx.array]:
out = {}
mx.eval(model.parameters())
for split in ["train", "val"]:
losses = []
for _ in range(10):
xb, yb = get_batches(
dataset, split, config["batch_size"], config["context_length"], config
)
loss = model.loss(xb, yb)
losses.append(loss.item())
out[split] = mx.mean(mx.array(losses)).item()
return out
def train(model: nn.Module, optimizer, config=CONFIG):
losses = []
loss_and_grad_fn = nn.value_and_grad(model, model.loss)
pbar = tqdm(range(config["steps"]))
for step in pbar:
xs, ys = get_batches(
dataset, "train", config["batch_size"], config["context_length"]
)
loss, grads = loss_and_grad_fn(xs, ys)
model.update(optimizer.apply_gradients(grads, model))
mx.simplify(loss, model.parameters())
# mx.eval(loss, model.parameters())
losses.append(loss.item())
pbar.set_description(f"loss: ({loss.item():.2f})")
print(evaluate_loss(model))
if __name__ == "__main__":
dataset = mx.array(encode(lines))
args = ModelArgs()
model = Llama(args)
nparams = sum(x.size for k, x in tree_flatten(model.parameters()))
print(f"training a model with {nparams} trainable params")
| """
Super simple train.py, getting started without any tokenizers,
and with a very simple training loop.
"""
lines = open("./data/example.txt", "r").read()
vocab = sorted(list(set(lines)))
itos = {i: ch for i, ch in enumerate(vocab)}
stoi = {ch: i for i, ch in enumerate(vocab)}
CONFIG = {
"context_length": 16,
"batch_size": 32,
"steps": 1000,
"learning_rate": 0.001,
}
def encode(s):
return [stoi[ch] for ch in s]
def decode(l):
return "".join([itos[i] for i in l])
def get_batches(
data: mx.array, split: str, batch_size: int, context_window: int, config=CONFIG
) -> tuple[mx.array, mx.array]:
train = data[: int(0.8 * len(data))]
val = data[int(0.8 * len(data)) : int(0.9 * len(data))]
test = data[int(0.9 * len(data)) :]
batch_data = train
if split == "val":
batch_data = val
if split == "test":
batch_data = test
ixs = mx.random.randint(
0, batch_data.shape[0] - context_window - 1, shape=(batch_size,)
).tolist()
# create B x C tensors of x and y
x = mx.concatenate(
[mx.expand_dims(batch_data[ix : ix + context_window], 0) for ix in ixs], axis=0
)
y = mx.concatenate(
[mx.expand_dims(batch_data[ix + 1 : ix + context_window + 1], 0) for ix in ixs],
axis=0,
)
return x, y
def evaluate_loss(model, config=CONFIG) -> dict[str, mx.array]:
out = {}
mx.eval(model.parameters())
for split in ["train", "val"]:
losses = []
for _ in range(10):
xb, yb = get_batches(
dataset, split, config["batch_size"], config["context_length"], config
)
loss = model.loss(xb, yb)
losses.append(loss.item())
out[split] = mx.mean(mx.array(losses)).item()
return out
def train(model: nn.Module, optimizer, config=CONFIG):
losses = []
loss_and_grad_fn = nn.value_and_grad(model, model.loss)
pbar = tqdm(range(config["steps"]))
for step in pbar:
xs, ys = get_batches(
dataset, "train", config["batch_size"], config["context_length"]
)
loss, grads = loss_and_grad_fn(xs, ys)
model.update(optimizer.apply_gradients(grads, model))
mx.simplify(loss, model.parameters())
# mx.eval(loss, model.parameters())
losses.append(loss.item())
pbar.set_description(f"loss: ({loss.item():.2f})")
print(evaluate_loss(model))
if __name__ == "__main__":
dataset = mx.array(encode(lines))
args = ModelArgs()
model = Llama(args)
nparams = sum(x.size for k, x in tree_flatten(model.parameters()))
print(f"training a model with {nparams} trainable params")
| optimizer = AdamW( | 2 | 2023-12-06 13:31:42+00:00 | 4k |
TaoHuang13/diffusion_reward | diffusion_reward/models/reward_models/diffusion_reward.py | [
{
"identifier": "build_model",
"path": "diffusion_reward/models/video_models/vqdiffusion/modeling/build.py",
"snippet": "def build_model(config, args=None):\n return instantiate_from_config(config['model'])"
},
{
"identifier": "index_to_log_onehot",
"path": "diffusion_reward/models/video_models/vqdiffusion/modeling/transformers/diffusion_transformer.py",
"snippet": "def index_to_log_onehot(x, num_classes):\n assert x.max().item() < num_classes, \\\n f'Error: {x.max().item()} >= {num_classes}'\n x_onehot = F.one_hot(x, num_classes)\n permute_order = (0, -1) + tuple(range(1, len(x.size())))\n x_onehot = x_onehot.permute(permute_order)\n log_x = torch.log(x_onehot.float().clamp(min=1e-30))\n return log_x"
},
{
"identifier": "log_categorical",
"path": "diffusion_reward/models/video_models/vqdiffusion/modeling/transformers/diffusion_transformer.py",
"snippet": "def log_categorical(log_x_start, log_prob):\n return (log_x_start.exp() * log_prob).sum(dim=1)"
},
{
"identifier": "log_onehot_to_index",
"path": "diffusion_reward/models/video_models/vqdiffusion/modeling/transformers/diffusion_transformer.py",
"snippet": "def log_onehot_to_index(log_x):\n return log_x.argmax(1)"
},
{
"identifier": "sum_except_batch",
"path": "diffusion_reward/models/video_models/vqdiffusion/modeling/transformers/diffusion_transformer.py",
"snippet": "def sum_except_batch(x, num_dims=1):\n return x.reshape(*x.shape[:num_dims], -1).sum(-1)"
},
{
"identifier": "load_yaml_config",
"path": "diffusion_reward/models/video_models/vqdiffusion/utils/io.py",
"snippet": "def load_yaml_config(path):\n with open(path) as f:\n config = yaml.full_load(f)\n return config"
},
{
"identifier": "get_model_parameters_info",
"path": "diffusion_reward/models/video_models/vqdiffusion/utils/misc.py",
"snippet": "def get_model_parameters_info(model):\n # for mn, m in model.named_modules():\n parameters = {'overall': {'trainable': 0, 'non_trainable': 0, 'total': 0}}\n for child_name, child_module in model.named_children():\n parameters[child_name] = {'trainable': 0, 'non_trainable': 0}\n for pn, p in child_module.named_parameters():\n if p.requires_grad:\n parameters[child_name]['trainable'] += p.numel()\n else:\n parameters[child_name]['non_trainable'] += p.numel()\n parameters[child_name]['total'] = parameters[child_name]['trainable'] + parameters[child_name]['non_trainable']\n \n parameters['overall']['trainable'] += parameters[child_name]['trainable']\n parameters['overall']['non_trainable'] += parameters[child_name]['non_trainable']\n parameters['overall']['total'] += parameters[child_name]['total']\n \n # format the numbers\n def format_number(num):\n K = 2**10\n M = 2**20\n G = 2**30\n if num > G: # K\n uint = 'G'\n num = round(float(num)/G, 2)\n elif num > M:\n uint = 'M'\n num = round(float(num)/M, 2)\n elif num > K:\n uint = 'K'\n num = round(float(num)/K, 2)\n else:\n uint = ''\n \n return '{}{}'.format(num, uint)\n \n def format_dict(d):\n for k, v in d.items():\n if isinstance(v, dict):\n format_dict(v)\n else:\n d[k] = format_number(v)\n \n format_dict(parameters)\n return parameters"
}
] | import os
import hydra
import torch
import torch.nn as nn
import torch.nn.functional as F
import yaml
from pathlib import Path
from ..video_models.vqdiffusion.modeling.build import build_model
from ..video_models.vqdiffusion.modeling.transformers.diffusion_transformer import (
index_to_log_onehot, log_categorical, log_onehot_to_index,
sum_except_batch)
from ..video_models.vqdiffusion.utils.io import load_yaml_config
from ..video_models.vqdiffusion.utils.misc import get_model_parameters_info | 2,296 | self.use_std = cfg.use_std
if self.use_std:
stat_path = str(Path(__file__).parents[3]) + cfg.stat_path
with open(stat_path, 'r') as file:
self.stat = yaml.safe_load(file)[cfg.task_name][cfg.skip_step]
# build exploration reward model
self.use_expl_reward = cfg.use_expl_reward
if self.use_expl_reward:
cfg.expl_reward.obs_shape = cfg.obs_shape
cfg.expl_reward.action_shape = cfg.action_shape
self.expl_reward = hydra.utils.instantiate(cfg.expl_reward)
self.expl_scale = cfg.expl_scale
def get_model(self, ema, model_path, config_path):
if 'OUTPUT' in model_path: # pretrained model
model_name = model_path.split(os.path.sep)[-3]
else:
model_name = os.path.basename(config_path).replace('.yaml', '')
config = load_yaml_config(config_path)
model = build_model(config)
model_parameters = get_model_parameters_info(model)
print(model_parameters)
if os.path.exists(model_path):
ckpt = torch.load(model_path, map_location="cpu")
if 'last_epoch' in ckpt:
epoch = ckpt['last_epoch']
elif 'epoch' in ckpt:
epoch = ckpt['epoch']
else:
epoch = 0
missing, unexpected = model.load_state_dict(ckpt["model"], strict=False)
print('Model missing keys:\n', missing)
print('Model unexpected keys:\n', unexpected)
if ema==True and 'ema' in ckpt:
print("Evaluate EMA model")
ema_model = model.get_ema_model()
missing, unexpected = ema_model.load_state_dict(ckpt['ema'], strict=False)
else:
epoch = None
return {'model': model, 'epoch': epoch, 'model_name': model_name, 'parameter': model_parameters}
def imgs_to_batch(self, x, reward_type='entropy'):
'''
input:
imgs: B * T * H * W * C
(mostly): 1 * T * ...
'''
assert x.max() <= 1
# preprocessing
seq_len = x.shape[1]
num_frames = self.model.cfg.params['condition_emb_config']['params']['num_cond_frames'] + 1
n_skip = self.model.frame_skip
subseq_len = (num_frames - 1) * n_skip
x = x.permute(0, 1, 4, 2 ,3)
_, indices = self.model.content_codec.encode_to_z(x)
assert indices.shape[0] == 1
indices = indices.reshape(indices.shape[0], seq_len, -1)
if reward_type == 'entropy':
# only return conditional frames
post_idxes = list(range(seq_len - subseq_len + n_skip))
batch_indices = [indices[:, idx:idx+subseq_len:n_skip] for idx in post_idxes]
batch_indices = torch.stack(batch_indices, dim=0)
batch_indices = batch_indices.squeeze(1).reshape(batch_indices.shape[0], -1)
if subseq_len - n_skip > 0:
pre_batch_indices = [indices[:, idx].tile((1, num_frames - 1)) for idx in range(subseq_len-n_skip)]
pre_batch_indices = torch.concat(pre_batch_indices, dim=0)
batch_indices = torch.concat([pre_batch_indices, batch_indices], dim=0)
cond = {'condition_token': batch_indices}
elif reward_type == 'likelihood':
# return conditional frames + current frame
post_idxes = list(range(seq_len - subseq_len))
batch_indices = [indices[:, idx:idx+subseq_len+n_skip:n_skip] for idx in post_idxes]
batch_indices = torch.stack(batch_indices, dim=0)
batch_indices = batch_indices.squeeze(1).reshape(batch_indices.shape[0], -1)
if subseq_len - n_skip > 0:
pre_batch_indices = [indices[:, idx].tile((1, num_frames)) for idx in range(subseq_len)]
pre_batch_indices = torch.concat(pre_batch_indices, dim=0)
batch_indices = torch.concat([pre_batch_indices, batch_indices], dim=0)
cond = {'condition_token': batch_indices}
else:
raise NotImplementedError
x = x.flatten(0, 1)
cont = {'content_token': indices[0]}
return cont, cond, indices[0]
@torch.no_grad()
def calc_reward(self, imgs):
self.model.eval()
content, condition, _ = self.imgs_to_batch(imgs, reward_type=self.reward_type)
content_token = content['content_token']
condition_token = condition['condition_token']
rewards = self.calc_vlb(content_token, condition_token)
if self.use_std:
rewards_std = (rewards - self.stat[0]) / self.stat[1]
scaled_rewards = (1 - self.expl_scale) * rewards_std
return scaled_rewards
@torch.no_grad()
def calc_vlb(self, cont_emb, cond_emb):
x = cont_emb
b, device = x.size(0), x.device
transformer = self.model.transformer
cond_emb = transformer.condition_emb(cond_emb).float()
# t=0
start_step = transformer.num_timesteps
x_start = x
t = torch.full((b,), start_step-1, device=device, dtype=torch.long)
|
class DiffusionReward(nn.Module):
def __init__(self, cfg):
super(DiffusionReward, self).__init__()
# load video models
self.info = self.get_model(ema=True, model_path=cfg.ckpt_path, config_path=cfg.cfg_path)
self.model = self.info['model']
self.epoch = self.info['epoch']
self.model_name = self.info['model_name']
# self.model = self.model.cuda()
self.model.eval()
for param in self.model.parameters():
param.requires_grad = False
# set attribute
for attr_name, attr_value in cfg.items():
setattr(self, attr_name, attr_value)
# standardization
self.use_std = cfg.use_std
if self.use_std:
stat_path = str(Path(__file__).parents[3]) + cfg.stat_path
with open(stat_path, 'r') as file:
self.stat = yaml.safe_load(file)[cfg.task_name][cfg.skip_step]
# build exploration reward model
self.use_expl_reward = cfg.use_expl_reward
if self.use_expl_reward:
cfg.expl_reward.obs_shape = cfg.obs_shape
cfg.expl_reward.action_shape = cfg.action_shape
self.expl_reward = hydra.utils.instantiate(cfg.expl_reward)
self.expl_scale = cfg.expl_scale
def get_model(self, ema, model_path, config_path):
if 'OUTPUT' in model_path: # pretrained model
model_name = model_path.split(os.path.sep)[-3]
else:
model_name = os.path.basename(config_path).replace('.yaml', '')
config = load_yaml_config(config_path)
model = build_model(config)
model_parameters = get_model_parameters_info(model)
print(model_parameters)
if os.path.exists(model_path):
ckpt = torch.load(model_path, map_location="cpu")
if 'last_epoch' in ckpt:
epoch = ckpt['last_epoch']
elif 'epoch' in ckpt:
epoch = ckpt['epoch']
else:
epoch = 0
missing, unexpected = model.load_state_dict(ckpt["model"], strict=False)
print('Model missing keys:\n', missing)
print('Model unexpected keys:\n', unexpected)
if ema==True and 'ema' in ckpt:
print("Evaluate EMA model")
ema_model = model.get_ema_model()
missing, unexpected = ema_model.load_state_dict(ckpt['ema'], strict=False)
else:
epoch = None
return {'model': model, 'epoch': epoch, 'model_name': model_name, 'parameter': model_parameters}
def imgs_to_batch(self, x, reward_type='entropy'):
'''
input:
imgs: B * T * H * W * C
(mostly): 1 * T * ...
'''
assert x.max() <= 1
# preprocessing
seq_len = x.shape[1]
num_frames = self.model.cfg.params['condition_emb_config']['params']['num_cond_frames'] + 1
n_skip = self.model.frame_skip
subseq_len = (num_frames - 1) * n_skip
x = x.permute(0, 1, 4, 2 ,3)
_, indices = self.model.content_codec.encode_to_z(x)
assert indices.shape[0] == 1
indices = indices.reshape(indices.shape[0], seq_len, -1)
if reward_type == 'entropy':
# only return conditional frames
post_idxes = list(range(seq_len - subseq_len + n_skip))
batch_indices = [indices[:, idx:idx+subseq_len:n_skip] for idx in post_idxes]
batch_indices = torch.stack(batch_indices, dim=0)
batch_indices = batch_indices.squeeze(1).reshape(batch_indices.shape[0], -1)
if subseq_len - n_skip > 0:
pre_batch_indices = [indices[:, idx].tile((1, num_frames - 1)) for idx in range(subseq_len-n_skip)]
pre_batch_indices = torch.concat(pre_batch_indices, dim=0)
batch_indices = torch.concat([pre_batch_indices, batch_indices], dim=0)
cond = {'condition_token': batch_indices}
elif reward_type == 'likelihood':
# return conditional frames + current frame
post_idxes = list(range(seq_len - subseq_len))
batch_indices = [indices[:, idx:idx+subseq_len+n_skip:n_skip] for idx in post_idxes]
batch_indices = torch.stack(batch_indices, dim=0)
batch_indices = batch_indices.squeeze(1).reshape(batch_indices.shape[0], -1)
if subseq_len - n_skip > 0:
pre_batch_indices = [indices[:, idx].tile((1, num_frames)) for idx in range(subseq_len)]
pre_batch_indices = torch.concat(pre_batch_indices, dim=0)
batch_indices = torch.concat([pre_batch_indices, batch_indices], dim=0)
cond = {'condition_token': batch_indices}
else:
raise NotImplementedError
x = x.flatten(0, 1)
cont = {'content_token': indices[0]}
return cont, cond, indices[0]
@torch.no_grad()
def calc_reward(self, imgs):
self.model.eval()
content, condition, _ = self.imgs_to_batch(imgs, reward_type=self.reward_type)
content_token = content['content_token']
condition_token = condition['condition_token']
rewards = self.calc_vlb(content_token, condition_token)
if self.use_std:
rewards_std = (rewards - self.stat[0]) / self.stat[1]
scaled_rewards = (1 - self.expl_scale) * rewards_std
return scaled_rewards
@torch.no_grad()
def calc_vlb(self, cont_emb, cond_emb):
x = cont_emb
b, device = x.size(0), x.device
transformer = self.model.transformer
cond_emb = transformer.condition_emb(cond_emb).float()
# t=0
start_step = transformer.num_timesteps
x_start = x
t = torch.full((b,), start_step-1, device=device, dtype=torch.long) | log_x_start = index_to_log_onehot(x_start, transformer.num_classes) | 1 | 2023-12-05 02:42:28+00:00 | 4k |
mkang315/ASF-YOLO | utils/loss.py | [
{
"identifier": "bbox_iou",
"path": "utils/metrics.py",
"snippet": "def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, EIoU=False, eps=1e-7):\n # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4)\n\n # Get the coordinates of bounding boxes\n if xywh: # transform from xywh to xyxy\n (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1)\n w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2\n b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_\n b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_\n else: # x1, y1, x2, y2 = box1\n b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1)\n b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1)\n w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps)\n w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps)\n\n # Intersection area\n inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \\\n (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0)\n\n # Union Area\n union = w1 * h1 + w2 * h2 - inter + eps\n\n # IoU\n iou = inter / union\n if GIoU or DIoU or CIoU or EIoU:\n cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width\n ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height\n if CIoU or DIoU or EIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1\n c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared\n rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2\n if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47\n v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2)\n with torch.no_grad():\n alpha = v / (v - iou + (1 + eps))\n return iou - (rho2 / c2 + v * alpha) # CIoU\n elif EIoU:\n rho_w2 = ((b2_x2 - b2_x1) - (b1_x2 - b1_x1)) ** 2\n rho_h2 = ((b2_y2 - b2_y1) - (b1_y2 - b1_y1)) ** 2\n cw2 = cw ** 2 + eps\n ch2 = ch ** 2 + eps\n return iou - (rho2 / c2 + rho_w2 / cw2 + rho_h2 / ch2)\n return iou - rho2 / c2 # DIoU\n c_area = cw * ch + eps # convex area\n return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf\n return iou # IoU"
},
{
"identifier": "de_parallel",
"path": "utils/torch_utils.py",
"snippet": "def de_parallel(model):\n # De-parallelize a model: returns single-GPU model if model is of type DP or DDP\n return model.module if is_parallel(model) else model"
}
] | import torch
import torch.nn as nn
from utils.metrics import bbox_iou
from utils.torch_utils import de_parallel | 2,790 | super().__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred = torch.sigmoid(pred) # prob from logits
dx = pred - true # reduce only missing label effects
# dx = (pred - true).abs() # reduce missing label and false label effects
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
loss *= alpha_factor
return loss.mean()
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred_prob = torch.sigmoid(pred) # prob from logits
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class ComputeLoss:
sort_obj_iou = False
# Compute losses
def __init__(self, model, autobalance=False):
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
# Focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
m = de_parallel(model).model[-1] # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
self.na = m.na # number of anchors
self.nc = m.nc # number of classes
self.nl = m.nl # number of layers
self.anchors = m.anchors
self.device = device
def __call__(self, p, targets): # predictions, targets
lcls = torch.zeros(1, device=self.device) # class loss
lbox = torch.zeros(1, device=self.device) # box loss
lobj = torch.zeros(1, device=self.device) # object loss
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
# Losses
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj
n = b.shape[0] # number of targets
if n:
# pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0
pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions
# Regression
pxy = pxy.sigmoid() * 2 - 0.5
pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box
| # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Loss functions
"""
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
class BCEBlurWithLogitsLoss(nn.Module):
# BCEwithLogitLoss() with reduced missing label effects.
def __init__(self, alpha=0.05):
super().__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred = torch.sigmoid(pred) # prob from logits
dx = pred - true # reduce only missing label effects
# dx = (pred - true).abs() # reduce missing label and false label effects
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
loss *= alpha_factor
return loss.mean()
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred_prob = torch.sigmoid(pred) # prob from logits
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class ComputeLoss:
sort_obj_iou = False
# Compute losses
def __init__(self, model, autobalance=False):
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
# Focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
m = de_parallel(model).model[-1] # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
self.na = m.na # number of anchors
self.nc = m.nc # number of classes
self.nl = m.nl # number of layers
self.anchors = m.anchors
self.device = device
def __call__(self, p, targets): # predictions, targets
lcls = torch.zeros(1, device=self.device) # class loss
lbox = torch.zeros(1, device=self.device) # box loss
lobj = torch.zeros(1, device=self.device) # object loss
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
# Losses
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj
n = b.shape[0] # number of targets
if n:
# pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0
pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions
# Regression
pxy = pxy.sigmoid() * 2 - 0.5
pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box | iou = bbox_iou(pbox, tbox[i], ESIoU=True).squeeze() # iou(prediction, target) | 0 | 2023-12-10 14:18:29+00:00 | 4k |
user1342/Tweezer | Tweezer/tweezer.py | [
{
"identifier": "GhidraBridge",
"path": "Tweezer/GhidraBridge/ghidra_bridge.py",
"snippet": "class GhidraBridge():\n def __init__(self):\n pass\n\n def _execute_blocking_command(self, command_as_list):\n if command_as_list != None:\n print(\"Executing command: {}\".format(command_as_list))\n result = subprocess.run(command_as_list, capture_output=False, stdout=subprocess.PIPE)\n return result\n\n def generate_ghidra_decom_script(self, path_to_save_decoms_to, file_to_save_script_to):\n\n script = \"\"\"# SaveFunctions.py\n \n# Import necessary Ghidra modules\nfrom ghidra.program.model.listing import Function\nfrom ghidra.util.task import TaskMonitor\nfrom ghidra.app.decompiler import DecompInterface\nimport os\nimport time\nimport re\n\n# Function to save the decompiled C code of a function to a file\ndef save_function_c_code(function, output_directory):\n function_name = function.getName()\n function_c_code = decompile_function_to_c_code(function)\n \n # Create the output directory if it doesn't exist\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n \n # Save the C code to a file\n current_epoch_time = int(time.time())\n\n # Combine the elements to create the file path\n output_file_path = os.path.join(\n output_directory,\n re.sub(r'[^\\w\\-\\.\\\\/]', '_', \"{}__{}__{}.c\".format(\n function.getProgram().getName(),\n function_name,\n int(time.time())\n ))\n )\n\n with open(output_file_path, 'w') as output_file:\n output_file.write(function_c_code)\n\n# Function to decompile a function to C code\ndef decompile_function_to_c_code(function):\n decompiler = get_decompiler(function.getProgram())\n result = decompiler.decompileFunction(function, 0, TaskMonitor.DUMMY)\n return result.getDecompiledFunction().getC()\n\n# Function to get the decompiler for the current program\ndef get_decompiler(program):\n decompiler_options = program.getOptions(\"Decompiler\")\n decompiler_id = decompiler_options.getString(\"decompiler\", \"ghidra\")\n decompiler = DecompInterface()\n decompiler.openProgram(program)\n return decompiler\n\n# Main function to iterate through all functions and save their C code\ndef save_all_functions_to_files():\n current_program = getCurrentProgram()\n listing = current_program.getListing()\n \n # Specify the output directory\n output_directory = r\"<PATH>\"\n \n # Iterate through all functions\n for function in listing.getFunctions(True):\n function_name = function.getName()\n save_function_c_code(function, output_directory)\n\n# Run the main function\nsave_all_functions_to_files()\n \"\"\".replace(\"<PATH>\", path_to_save_decoms_to)\n\n with open(file_to_save_script_to, \"w\") as file:\n file.write(script)\n\n def _check_if_ghidra_project_exists(self, project_folder, project_name):\n\n project_folder_path = Path(project_folder, project_name + \".gpr\")\n\n return project_folder_path.exists()\n\n def _construct_ghidra_headless_command(self, binary_path, script_path, binary_hash,\n ghidra_project_dir=Path.cwd().name):\n\n binary_name = \"analyzeHeadless.bat\"\n\n # Check if the binary is on the PATH\n headless = shutil.which(binary_name)\n\n temp_script_path = Path(script_path)\n temp_script_dir = temp_script_path.parent\n Path(temp_script_dir).resolve()\n if headless is not None:\n print(f\"{binary_name} found at: {headless}\")\n else:\n # Binary not found, prompt user to provide the path\n user_provided_path = input(f\"{binary_name} not found on the PATH. Please provide the full path: \")\n\n # Verify if the provided path is valid\n if shutil.which(user_provided_path) is not None:\n headless = user_provided_path\n print(f\"{binary_name} found at: {headless}\")\n\n headless = user_provided_path\n else:\n raise Exception(f\"Error: {binary_name} not found at the provided path.\")\n\n with tempfile.TemporaryDirectory() as ghidra_project_dir:\n # Construct Ghidra headless command\n commandStr = [\n headless,\n ghidra_project_dir,\n binary_hash,\n \"-import\",\n binary_path,\n \"-scriptPath\",\n temp_script_dir,\n \"-postScript\",\n temp_script_path.name\n ]\n\n # Run Ghidra headless command\n self._execute_blocking_command(commandStr)\n\n def _hash_binary(self, binary_path):\n with open(binary_path, 'rb') as f:\n binary_hash = hashlib.sha256(f.read()).hexdigest()\n return binary_hash\n\n def decompile_binaries_functions(self, path_to_binary, decom_folder):\n binary_hash = self._hash_binary(path_to_binary)\n with tempfile.TemporaryDirectory() as tmpdirname:\n script_path = Path(tmpdirname, \"decom_script.py\").resolve()\n self.generate_ghidra_decom_script(decom_folder, script_path)\n self._construct_ghidra_headless_command(path_to_binary, script_path, binary_hash)\n\n def decompile_all_binaries_in_folder(self, path_to_folder, decom_folder):\n # Create a list to store all the file paths\n files_to_process = [file_path for file_path in Path(path_to_folder).iterdir() if file_path.is_file()]\n\n # Use a ProcessPoolExecutor to execute the decompilation in parallel\n with ProcessPoolExecutor() as executor:\n # Create a list of futures\n futures = [executor.submit(self.decompile_binaries_functions, file_path, decom_folder) for file_path in\n files_to_process]\n\n # Use tqdm to show progress\n for _ in tqdm(concurrent.futures.as_completed(futures), total=len(files_to_process),\n desc=\"Decompiling functions in binaries from {}\".format(path_to_folder)):\n pass"
},
{
"identifier": "Model",
"path": "Tweezer/Model/model.py",
"snippet": "class Model:\n vectors = []\n vectors_path = \"\"\n\n def __init__(self, vectors_path=\"TweezerMDL\", vector_size=100):\n self.vectors_path = vectors_path\n\n if Path(vectors_path).is_file():\n self.vectors = self.read_vector_file(vectors_path)\n\n def learn(self, dict_to_add_to_dataset):\n dict_with_vector = self.process_code_and_append_vector(dict_to_add_to_dataset)\n self.vectors.append(dict_with_vector)\n self.save_vector_file(self.vectors, self.vectors_path)\n\n def save_vector_file(self, vectors, vectors_path):\n with open(vectors_path, 'wb') as file:\n pickle.dump(vectors, file)\n\n def read_vector_file(self, vector_path):\n with open(vector_path, 'rb') as file:\n return pickle.load(file)\n\n def process_code_and_append_vector(self, data):\n \"\"\"\n Process the given code data using Word2Vec to generate a fixed-size vector and append it to the data dict.\n\n :param data: A dictionary in the format {\"name\": <string>, \"code\": <list of strings>}\n :return: The original dictionary with an added key \"vector\" containing the Word2Vec vector of the code.\n \"\"\"\n # Constants\n VECTOR_SIZE = 500\n\n # Extract code lines from the data\n code_lines = data[\"code\"]\n\n # Tokenize the code lines\n tokens = [line.split() for line in code_lines]\n\n # Train a Word2Vec model\n model = Word2Vec(tokens, vector_size=VECTOR_SIZE, window=5, min_count=1, workers=4)\n\n # Generate vectors for each token and aggregate them\n vectors = np.zeros((VECTOR_SIZE,))\n for token_line in tokens:\n for token in token_line:\n if token in model.wv:\n vectors += model.wv[token]\n\n # Average the vectors\n if len(tokens) > 0:\n vectors /= len(tokens)\n\n # Pad or truncate the vector to ensure it's of size VECTOR_SIZE\n if len(vectors) > VECTOR_SIZE:\n vectors = vectors[:VECTOR_SIZE]\n elif len(vectors) < VECTOR_SIZE:\n vectors = np.pad(vectors, (0, VECTOR_SIZE - len(vectors)), 'constant')\n\n # Append the vector to the data dictionary\n data[\"vector\"] = vectors\n\n return data\n\n def find_closest_code(self, dataset, target):\n \"\"\"\n Find the code in the dataset closest to the target based on their vectors and add a 'distance' field to each.\n\n :param dataset: A list of dictionaries, each with a 'vector' key among others.\n :param target: A dictionary with a 'vector' key.\n :return: The dataset with an added 'distance' field in each dictionary indicating closeness to the target.\n \"\"\"\n target_vector = target.get('vector')\n\n for data in dataset:\n data_vector = data.get('vector')\n # Calculate cosine distance between vectors, lower means closer\n data['distance'] = cosine(data_vector, target_vector)\n\n return dataset"
},
{
"identifier": "Trainer",
"path": "Tweezer/Training/trainer.py",
"snippet": "class Trainer():\n\n def __init__(self):\n pass\n\n def _generate_decompiled_functions_from_binaries(self, paths_to_binary_folders, decom_output):\n bridge = GhidraBridge()\n for path in paths_to_binary_folders:\n print(\"Decompiling: {}\".format(path))\n bridge.decompile_all_binaries_in_folder(Path(path).resolve(), decom_output)"
}
] | import argparse
import re
import tempfile
from pathlib import Path
from pprint import pprint
from Tweezer.GhidraBridge.ghidra_bridge import GhidraBridge
from Tweezer.Model.model import Model
from Tweezer.Training.trainer import Trainer | 2,466 |
class Tweezer():
def __init__(self, model_path="TweezerMDL"):
self.model = None
self.model_path = model_path
def train(self, list_of_binary_folders):
self.extend_model_training(list_of_binary_folders)
def extend_model_training(self, list_of_binary_folders):
|
class Tweezer():
def __init__(self, model_path="TweezerMDL"):
self.model = None
self.model_path = model_path
def train(self, list_of_binary_folders):
self.extend_model_training(list_of_binary_folders)
def extend_model_training(self, list_of_binary_folders): | trainer = Trainer() | 2 | 2023-12-10 21:01:03+00:00 | 4k |
felixcheng97/AGAP | lib/dpvgo.py | [
{
"identifier": "Raw2Alpha",
"path": "lib/dvgo.py",
"snippet": "class Raw2Alpha(torch.autograd.Function):\n @staticmethod\n def forward(ctx, density, shift, interval):\n '''\n alpha = 1 - exp(-softplus(density + shift) * interval)\n = 1 - exp(-log(1 + exp(density + shift)) * interval)\n = 1 - exp(log(1 + exp(density + shift)) ^ (-interval))\n = 1 - (1 + exp(density + shift)) ^ (-interval)\n '''\n exp, alpha = render_utils_cuda.raw2alpha(density, shift, interval)\n if density.requires_grad:\n ctx.save_for_backward(exp)\n ctx.interval = interval\n return alpha\n\n @staticmethod\n @torch.autograd.function.once_differentiable\n def backward(ctx, grad_back):\n '''\n alpha' = interval * ((1 + exp(density + shift)) ^ (-interval-1)) * exp(density + shift)'\n = interval * ((1 + exp(density + shift)) ^ (-interval-1)) * exp(density + shift)\n '''\n exp = ctx.saved_tensors[0]\n interval = ctx.interval\n return render_utils_cuda.raw2alpha_backward(exp, grad_back.contiguous(), interval), None, None"
},
{
"identifier": "Alphas2Weights",
"path": "lib/dvgo.py",
"snippet": "class Alphas2Weights(torch.autograd.Function):\n @staticmethod\n def forward(ctx, alpha, ray_id, N):\n weights, T, alphainv_last, i_start, i_end = render_utils_cuda.alpha2weight(alpha, ray_id, N)\n if alpha.requires_grad:\n ctx.save_for_backward(alpha, weights, T, alphainv_last, i_start, i_end)\n ctx.n_rays = N\n return weights, alphainv_last\n\n @staticmethod\n @torch.autograd.function.once_differentiable\n def backward(ctx, grad_weights, grad_last):\n alpha, weights, T, alphainv_last, i_start, i_end = ctx.saved_tensors\n grad = render_utils_cuda.alpha2weight_backward(\n alpha, weights, T, alphainv_last,\n i_start, i_end, ctx.n_rays, grad_weights, grad_last)\n return grad, None, None"
},
{
"identifier": "create_full_step_id",
"path": "lib/dmpigo.py",
"snippet": "@functools.lru_cache(maxsize=128)\ndef create_full_step_id(shape):\n ray_id = torch.arange(shape[0]).view(-1,1).expand(shape).flatten()\n step_id = torch.arange(shape[1]).view(1,-1).expand(shape).flatten()\n return ray_id, step_id"
}
] | import os
import time
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import segment_coo
from . import grid
from .dvgo import Raw2Alpha, Alphas2Weights
from .dmpigo import create_full_step_id
from .networks import *
from torch.utils.cpp_extension import load | 3,498 | rgb_np = np.uint8(rgb.numpy() * 255)
return rgb_np
def _set_equ_resolution(self, equ_size):
self.equ_size = equ_size
print('dpvgo equ_size ', self.equ_size)
def _set_grid_resolution(self, num_voxels):
# Determine grid resolution
self.num_voxels = num_voxels
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.world_len = self.world_size[0].item()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('dpvgo voxel_size ', self.voxel_size)
print('dpvgo world_size ', self.world_size)
print('dpvgo voxel_size_base ', self.voxel_size_base)
print('dpvgo voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'voxel_size_ratio': self.voxel_size_ratio,
'mask_cache_world_size': list(self.mask_cache.mask.shape),
'fast_color_thres': self.fast_color_thres,
'contracted_norm': self.contracted_norm,
'density_type': self.density_type,
'k0_type': self.k0_type,
'density_config': self.density_config,
'k0_config': self.k0_config,
**self.rgbnet_kwargs,
'equ_size': self.equ_size,
'xyz_config': self.xyz_config,
'viewdirs_config': self.viewdirs_config,
'deformation_config': self.deformation_config,
}
@torch.no_grad()
def scale_equ_grid(self, equ_size, upsample):
print('dpvgo scale_equ_grid start')
ori_equ_size = self.equ_size
self._set_equ_resolution(equ_size)
print('dpvgo scale_equ_grid scale equ_size from', ori_equ_size, 'to', self.equ_size)
self.k0.scale_equ_grid(self.equ_size, upsample)
print('dpvgo k0 scale_image_grid finish')
@torch.no_grad()
def scale_volume_grid(self, num_voxels):
print('dpvgo: scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels)
print('dpvgo: scale_volume_grid scale world_size from', ori_world_size.tolist(), 'to', self.world_size.tolist())
self.density.scale_volume_grid(self.world_size)
if np.prod(self.world_size.tolist()) <= 256**3:
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.world_size[0]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.world_size[1]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.world_size[2]),
), -1)
self_alpha = F.max_pool3d(self.activate_density(self.density.get_dense_grid()), kernel_size=3, padding=1, stride=1)[0,0]
self.mask_cache = grid.MaskGrid(
path=None, mask=self.mask_cache(self_grid_xyz) & (self_alpha>self.fast_color_thres),
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
print('dpvgo: scale_volume_grid finish')
@torch.no_grad()
def update_occupancy_cache(self):
ori_p = self.mask_cache.mask.float().mean().item()
cache_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.mask_cache.mask.shape[0]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.mask_cache.mask.shape[1]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.mask_cache.mask.shape[2]),
), -1)
cache_grid_density = self.density(cache_grid_xyz)[None,None]
cache_grid_alpha = self.activate_density(cache_grid_density)
cache_grid_alpha = F.max_pool3d(cache_grid_alpha, kernel_size=3, padding=1, stride=1)[0,0]
self.mask_cache.mask &= (cache_grid_alpha > self.fast_color_thres)
new_p = self.mask_cache.mask.float().mean().item()
print(f'dpvgo update mask_cache {ori_p:.4f} => {new_p:.4f}')
def update_occupancy_cache_lt_nviews(self, rays_o_tr, rays_d_tr, imsz, render_kwargs, maskout_lt_nviews):
print('dpvgo update mask_cache lt_nviews start')
eps_time = time.time()
count = torch.zeros_like(self.density.get_dense_grid()).long()
device = count.device
for rays_o_, rays_d_ in zip(rays_o_tr.split(imsz), rays_d_tr.split(imsz)):
ones = grid.DenseGrid(1, self.world_size, self.xyz_min, self.xyz_max)
for rays_o, rays_d in zip(rays_o_.split(8192), rays_d_.split(8192)):
ray_pts, inner_mask, t = self.sample_ray(
ori_rays_o=rays_o.to(device), ori_rays_d=rays_d.to(device),
**render_kwargs)
ones(ray_pts).sum().backward()
count.data += (ones.grid.grad > 1)
ori_p = self.mask_cache.mask.float().mean().item()
self.mask_cache.mask &= (count >= maskout_lt_nviews)[0,0]
new_p = self.mask_cache.mask.float().mean().item()
print(f'dpvgo update mask_cache {ori_p:.4f} => {new_p:.4f}')
eps_time = time.time() - eps_time
print(f'dpvgo update mask_cache lt_nviews finish (eps time:', eps_time, 'sec)')
def density_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.density.total_variation_add_grad(w, w, w, dense_mode)
def k0_total_variation_add_grad(self, weight, dense_mode):
wx = weight * self.equ_size[1] / 128
wy = weight * self.equ_size[0] / 128
self.k0.total_variation_2d_add_grad(wx, wy, dense_mode)
def activate_density(self, density, interval=None):
interval = interval if interval is not None else self.voxel_size_ratio
shape = density.shape
|
parent_dir = os.path.dirname(os.path.abspath(__file__))
ub360_utils_cuda = load(
name='ub360_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/ub360_utils.cpp', 'cuda/ub360_utils_kernel.cu']],
verbose=True)
'''Model'''
class DirectPanoramaVoxGO(nn.Module):
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_base=0,
alpha_init=None,
mask_cache_world_size=None,
fast_color_thres=0,
contracted_norm='l2',
density_type='DenseGrid', k0_type='DenseGrid',
density_config={}, k0_config={},
rgbnet_dim=0,
rgbnet_depth=3, rgbnet_width=128,
equ_size=(768,1536),
xyz_config={},
viewdirs_config={},
deformation_config={},
**kwargs):
super(DirectPanoramaVoxGO, self).__init__()
self.register_buffer('xyz_min', torch.Tensor([-1,-1,-1]))
self.register_buffer('xyz_max', torch.Tensor([1,1,1]))
if isinstance(fast_color_thres, dict):
self._fast_color_thres = fast_color_thres
self.fast_color_thres = fast_color_thres[0]
else:
self._fast_color_thres = None
self.fast_color_thres = fast_color_thres
self.contracted_norm = contracted_norm
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine init grid resolution
self._set_grid_resolution(num_voxels)
self._set_equ_resolution(equ_size)
# determine the density bias shift
self.alpha_init = alpha_init
self.register_buffer('act_shift', torch.FloatTensor([np.log(1/(1-alpha_init) - 1)]))
print('dpvgo: set density bias shift to', self.act_shift)
# init density voxel grid
self.density_type = density_type
self.density_config = density_config
self.density = grid.create_grid(
density_type, channels=1, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max,
config=self.density_config)
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
}
self.k0_type = k0_type
self.k0_config = k0_config
if rgbnet_dim == 0:
self.k0_explicit_grid = grid.DenseEquExplicitGrid(channels=3, equ_size=self.equ_size)
self.k0_explicit_mlp = None
else:
self.k0_explicit_grid = grid.DenseEquExplicitGrid(channels=rgbnet_dim, equ_size=self.equ_size)
self.k0_explicit_mlp = nn.Sequential(
nn.Linear(rgbnet_dim, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.k0_explicit_mlp[-1].bias, 0)
self.k0_explicit = grid.DenseEquExplicit(explicit_grid=self.k0_explicit_grid, explicit_mlp=self.k0_explicit_mlp, sigmoid=self.k0_type=='DenseEquExplicit')
self.k0 = self.k0_explicit
self.xyz_config = xyz_config
self.viewdirs_config = viewdirs_config
self.deformation_config = deformation_config
self.xyz_enc_type = xyz_config['enc_type']
if self.xyz_enc_type == 'pe':
self.embedding_xyz = PositionalEncoding(in_channels=3, **xyz_config[self.xyz_enc_type])
elif self.xyz_enc_type == 'hash':
self.embedding_xyz = HashEncoding(**xyz_config[self.xyz_enc_type])
else:
raise NotImplementedError
self.viewdirs_enc_type = viewdirs_config['enc_type']
if self.viewdirs_enc_type == 'pe':
self.embedding_viewdirs = ViewdirEncoding(in_channels=3, **viewdirs_config[self.viewdirs_enc_type])
elif self.viewdirs_enc_type == 'hash':
self.embedding_viewdirs = HashEncoding(**viewdirs_config[self.viewdirs_enc_type])
else:
raise NotImplementedError
self.deform_type = deformation_config['deform_type']
in_channels = self.embedding_xyz.out_channels + self.embedding_viewdirs.out_channels
if self.deform_type == 'mlp':
self.deformation_field = DeformationMLP(in_channels=in_channels, **deformation_config[self.deform_type])
else:
self.deformation_field = DeformationTCNN(in_channels=in_channels, **deformation_config[self.deform_type])
print('dpvgo: densitye grid', self.density)
print('dpvgo: k0', self.k0)
print('dpvgo: deformation field', self.deformation_field)
print('dpvgo: embedding_xyz', self.embedding_xyz)
print('dpvgo: embedding_viewdirs', self.embedding_viewdirs)
# Using the coarse geometry if provided (used to determine known free space and unknown space)
# Re-implement as occupancy grid (2021/1/31)
if mask_cache_world_size is None:
mask_cache_world_size = self.world_size
mask = torch.ones(list(mask_cache_world_size), dtype=torch.bool)
self.mask_cache = grid.MaskGrid(
path=None, mask=mask,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
def get_k0_grid_rgb(self):
rgb = self.k0.get_current_equ()[0].permute(1,2,0).detach().cpu()
rgb_np = np.uint8(rgb.numpy() * 255)
return rgb_np
def _set_equ_resolution(self, equ_size):
self.equ_size = equ_size
print('dpvgo equ_size ', self.equ_size)
def _set_grid_resolution(self, num_voxels):
# Determine grid resolution
self.num_voxels = num_voxels
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.world_len = self.world_size[0].item()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('dpvgo voxel_size ', self.voxel_size)
print('dpvgo world_size ', self.world_size)
print('dpvgo voxel_size_base ', self.voxel_size_base)
print('dpvgo voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'voxel_size_ratio': self.voxel_size_ratio,
'mask_cache_world_size': list(self.mask_cache.mask.shape),
'fast_color_thres': self.fast_color_thres,
'contracted_norm': self.contracted_norm,
'density_type': self.density_type,
'k0_type': self.k0_type,
'density_config': self.density_config,
'k0_config': self.k0_config,
**self.rgbnet_kwargs,
'equ_size': self.equ_size,
'xyz_config': self.xyz_config,
'viewdirs_config': self.viewdirs_config,
'deformation_config': self.deformation_config,
}
@torch.no_grad()
def scale_equ_grid(self, equ_size, upsample):
print('dpvgo scale_equ_grid start')
ori_equ_size = self.equ_size
self._set_equ_resolution(equ_size)
print('dpvgo scale_equ_grid scale equ_size from', ori_equ_size, 'to', self.equ_size)
self.k0.scale_equ_grid(self.equ_size, upsample)
print('dpvgo k0 scale_image_grid finish')
@torch.no_grad()
def scale_volume_grid(self, num_voxels):
print('dpvgo: scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels)
print('dpvgo: scale_volume_grid scale world_size from', ori_world_size.tolist(), 'to', self.world_size.tolist())
self.density.scale_volume_grid(self.world_size)
if np.prod(self.world_size.tolist()) <= 256**3:
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.world_size[0]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.world_size[1]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.world_size[2]),
), -1)
self_alpha = F.max_pool3d(self.activate_density(self.density.get_dense_grid()), kernel_size=3, padding=1, stride=1)[0,0]
self.mask_cache = grid.MaskGrid(
path=None, mask=self.mask_cache(self_grid_xyz) & (self_alpha>self.fast_color_thres),
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
print('dpvgo: scale_volume_grid finish')
@torch.no_grad()
def update_occupancy_cache(self):
ori_p = self.mask_cache.mask.float().mean().item()
cache_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.mask_cache.mask.shape[0]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.mask_cache.mask.shape[1]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.mask_cache.mask.shape[2]),
), -1)
cache_grid_density = self.density(cache_grid_xyz)[None,None]
cache_grid_alpha = self.activate_density(cache_grid_density)
cache_grid_alpha = F.max_pool3d(cache_grid_alpha, kernel_size=3, padding=1, stride=1)[0,0]
self.mask_cache.mask &= (cache_grid_alpha > self.fast_color_thres)
new_p = self.mask_cache.mask.float().mean().item()
print(f'dpvgo update mask_cache {ori_p:.4f} => {new_p:.4f}')
def update_occupancy_cache_lt_nviews(self, rays_o_tr, rays_d_tr, imsz, render_kwargs, maskout_lt_nviews):
print('dpvgo update mask_cache lt_nviews start')
eps_time = time.time()
count = torch.zeros_like(self.density.get_dense_grid()).long()
device = count.device
for rays_o_, rays_d_ in zip(rays_o_tr.split(imsz), rays_d_tr.split(imsz)):
ones = grid.DenseGrid(1, self.world_size, self.xyz_min, self.xyz_max)
for rays_o, rays_d in zip(rays_o_.split(8192), rays_d_.split(8192)):
ray_pts, inner_mask, t = self.sample_ray(
ori_rays_o=rays_o.to(device), ori_rays_d=rays_d.to(device),
**render_kwargs)
ones(ray_pts).sum().backward()
count.data += (ones.grid.grad > 1)
ori_p = self.mask_cache.mask.float().mean().item()
self.mask_cache.mask &= (count >= maskout_lt_nviews)[0,0]
new_p = self.mask_cache.mask.float().mean().item()
print(f'dpvgo update mask_cache {ori_p:.4f} => {new_p:.4f}')
eps_time = time.time() - eps_time
print(f'dpvgo update mask_cache lt_nviews finish (eps time:', eps_time, 'sec)')
def density_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.density.total_variation_add_grad(w, w, w, dense_mode)
def k0_total_variation_add_grad(self, weight, dense_mode):
wx = weight * self.equ_size[1] / 128
wy = weight * self.equ_size[0] / 128
self.k0.total_variation_2d_add_grad(wx, wy, dense_mode)
def activate_density(self, density, interval=None):
interval = interval if interval is not None else self.voxel_size_ratio
shape = density.shape | return Raw2Alpha.apply(density.flatten(), self.act_shift, interval).reshape(shape) | 0 | 2023-12-11 05:49:46+00:00 | 4k |
Vill-Lab/2024-AAAI-HPT | datasets/eurosat.py | [
{
"identifier": "OxfordPets",
"path": "datasets/oxford_pets.py",
"snippet": "class OxfordPets(DatasetBase):\n\n dataset_dir = \"oxford_pets\"\n\n def __init__(self, cfg):\n root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))\n self.dataset_dir = os.path.join(root, self.dataset_dir)\n self.image_dir = os.path.join(self.dataset_dir, \"images\")\n self.anno_dir = os.path.join(self.dataset_dir, \"annotations\")\n self.split_path = os.path.join(self.dataset_dir, \"split_zhou_OxfordPets.json\")\n self.split_fewshot_dir = os.path.join(self.dataset_dir, \"split_fewshot\")\n mkdir_if_missing(self.split_fewshot_dir)\n\n if os.path.exists(self.split_path):\n train, val, test = self.read_split(self.split_path, self.image_dir)\n else:\n trainval = self.read_data(split_file=\"trainval.txt\")\n test = self.read_data(split_file=\"test.txt\")\n train, val = self.split_trainval(trainval)\n self.save_split(train, val, test, self.split_path, self.image_dir)\n\n num_shots = cfg.DATASET.NUM_SHOTS\n if num_shots >= 1:\n seed = cfg.SEED\n preprocessed = os.path.join(self.split_fewshot_dir, f\"shot_{num_shots}-seed_{seed}.pkl\")\n \n if os.path.exists(preprocessed):\n print(f\"Loading preprocessed few-shot data from {preprocessed}\")\n with open(preprocessed, \"rb\") as file:\n data = pickle.load(file)\n train, val = data[\"train\"], data[\"val\"]\n else:\n train = self.generate_fewshot_dataset(train, num_shots=num_shots)\n val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))\n data = {\"train\": train, \"val\": val}\n print(f\"Saving preprocessed few-shot data to {preprocessed}\")\n with open(preprocessed, \"wb\") as file:\n pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)\n\n subsample = cfg.DATASET.SUBSAMPLE_CLASSES\n train, val, test = self.subsample_classes(train, val, test, subsample=subsample)\n\n super().__init__(train_x=train, val=val, test=test)\n\n def read_data(self, split_file):\n filepath = os.path.join(self.anno_dir, split_file)\n items = []\n\n with open(filepath, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip()\n imname, label, species, _ = line.split(\" \")\n breed = imname.split(\"_\")[:-1]\n breed = \"_\".join(breed)\n breed = breed.lower()\n imname += \".jpg\"\n impath = os.path.join(self.image_dir, imname)\n label = int(label) - 1 # convert to 0-based index\n item = Datum(impath=impath, label=label, classname=breed)\n items.append(item)\n\n return items\n\n @staticmethod\n def split_trainval(trainval, p_val=0.2):\n p_trn = 1 - p_val\n print(f\"Splitting trainval into {p_trn:.0%} train and {p_val:.0%} val\")\n tracker = defaultdict(list)\n for idx, item in enumerate(trainval):\n label = item.label\n tracker[label].append(idx)\n\n train, val = [], []\n for label, idxs in tracker.items():\n n_val = round(len(idxs) * p_val)\n assert n_val > 0\n random.shuffle(idxs)\n for n, idx in enumerate(idxs):\n item = trainval[idx]\n if n < n_val:\n val.append(item)\n else:\n train.append(item)\n\n return train, val\n\n @staticmethod\n def save_split(train, val, test, filepath, path_prefix):\n def _extract(items):\n out = []\n for item in items:\n impath = item.impath\n label = item.label\n classname = item.classname\n impath = impath.replace(path_prefix, \"\")\n if impath.startswith(\"/\"):\n impath = impath[1:]\n out.append((impath, label, classname))\n return out\n\n train = _extract(train)\n val = _extract(val)\n test = _extract(test)\n\n split = {\"train\": train, \"val\": val, \"test\": test}\n\n write_json(split, filepath)\n print(f\"Saved split to {filepath}\")\n\n @staticmethod\n def read_split(filepath, path_prefix):\n def _convert(items):\n out = []\n for impath, label, classname in items:\n impath = os.path.join(path_prefix, impath)\n item = Datum(impath=impath, label=int(label), classname=classname)\n out.append(item)\n return out\n\n print(f\"Reading split from {filepath}\")\n split = read_json(filepath)\n train = _convert(split[\"train\"])\n val = _convert(split[\"val\"])\n test = _convert(split[\"test\"])\n\n return train, val, test\n \n @staticmethod\n def subsample_classes(*args, subsample=\"all\"):\n \"\"\"Divide classes into two groups. The first group\n represents base classes while the second group represents\n new classes.\n\n Args:\n args: a list of datasets, e.g. train, val and test.\n subsample (str): what classes to subsample.\n \"\"\"\n assert subsample in [\"all\", \"base\", \"new\"]\n\n if subsample == \"all\":\n return args\n \n dataset = args[0]\n labels = set()\n for item in dataset:\n labels.add(item.label)\n labels = list(labels)\n labels.sort()\n n = len(labels)\n # Divide classes into two halves\n m = math.ceil(n / 2)\n\n print(f\"SUBSAMPLE {subsample.upper()} CLASSES!\")\n if subsample == \"base\":\n selected = labels[:m] # take the first half\n else:\n selected = labels[m:] # take the second half\n relabeler = {y: y_new for y_new, y in enumerate(selected)}\n \n output = []\n for dataset in args:\n dataset_new = []\n for item in dataset:\n if item.label not in selected:\n continue\n item_new = Datum(\n impath=item.impath,\n label=relabeler[item.label],\n classname=item.classname\n )\n dataset_new.append(item_new)\n output.append(dataset_new)\n \n return output"
},
{
"identifier": "DescribableTextures",
"path": "datasets/dtd.py",
"snippet": "class DescribableTextures(DatasetBase):\n\n dataset_dir = \"dtd\"\n\n def __init__(self, cfg):\n root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))\n self.dataset_dir = os.path.join(root, self.dataset_dir)\n self.image_dir = os.path.join(self.dataset_dir, \"images\")\n self.split_path = os.path.join(self.dataset_dir, \"split_zhou_DescribableTextures.json\")\n self.split_fewshot_dir = os.path.join(self.dataset_dir, \"split_fewshot\")\n mkdir_if_missing(self.split_fewshot_dir)\n\n if os.path.exists(self.split_path):\n train, val, test = OxfordPets.read_split(self.split_path, self.image_dir)\n else:\n train, val, test = self.read_and_split_data(self.image_dir)\n OxfordPets.save_split(train, val, test, self.split_path, self.image_dir)\n\n num_shots = cfg.DATASET.NUM_SHOTS\n if num_shots >= 1:\n seed = cfg.SEED\n preprocessed = os.path.join(self.split_fewshot_dir, f\"shot_{num_shots}-seed_{seed}.pkl\")\n \n if os.path.exists(preprocessed):\n print(f\"Loading preprocessed few-shot data from {preprocessed}\")\n with open(preprocessed, \"rb\") as file:\n data = pickle.load(file)\n train, val = data[\"train\"], data[\"val\"]\n else:\n train = self.generate_fewshot_dataset(train, num_shots=num_shots)\n val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))\n data = {\"train\": train, \"val\": val}\n print(f\"Saving preprocessed few-shot data to {preprocessed}\")\n with open(preprocessed, \"wb\") as file:\n pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)\n\n subsample = cfg.DATASET.SUBSAMPLE_CLASSES\n train, val, test = OxfordPets.subsample_classes(train, val, test, subsample=subsample)\n\n super().__init__(train_x=train, val=val, test=test)\n\n @staticmethod\n def read_and_split_data(image_dir, p_trn=0.5, p_val=0.2, ignored=[], new_cnames=None):\n # The data are supposed to be organized into the following structure\n # =============\n # images/\n # dog/\n # cat/\n # horse/\n # =============\n categories = listdir_nohidden(image_dir)\n categories = [c for c in categories if c not in ignored]\n categories.sort()\n\n p_tst = 1 - p_trn - p_val\n print(f\"Splitting into {p_trn:.0%} train, {p_val:.0%} val, and {p_tst:.0%} test\")\n\n def _collate(ims, y, c):\n items = []\n for im in ims:\n item = Datum(impath=im, label=y, classname=c) # is already 0-based\n items.append(item)\n return items\n\n train, val, test = [], [], []\n for label, category in enumerate(categories):\n category_dir = os.path.join(image_dir, category)\n images = listdir_nohidden(category_dir)\n images = [os.path.join(category_dir, im) for im in images]\n random.shuffle(images)\n n_total = len(images)\n n_train = round(n_total * p_trn)\n n_val = round(n_total * p_val)\n n_test = n_total - n_train - n_val\n assert n_train > 0 and n_val > 0 and n_test > 0\n\n if new_cnames is not None and category in new_cnames:\n category = new_cnames[category]\n\n train.extend(_collate(images[:n_train], label, category))\n val.extend(_collate(images[n_train : n_train + n_val], label, category))\n test.extend(_collate(images[n_train + n_val :], label, category))\n\n return train, val, test"
}
] | import os
import pickle
from dassl.data.datasets import DATASET_REGISTRY, Datum, DatasetBase
from dassl.utils import mkdir_if_missing
from .oxford_pets import OxfordPets
from .dtd import DescribableTextures as DTD | 2,940 |
NEW_CLASSNAMES = {
"AnnualCrop": "Annual Crop Land",
"Forest": "Forest",
"HerbaceousVegetation": "Herbaceous Vegetation Land",
"Highway": "Highway or Road",
"Industrial": "Industrial Buildings",
"Pasture": "Pasture Land",
"PermanentCrop": "Permanent Crop Land",
"Residential": "Residential Buildings",
"River": "River",
"SeaLake": "Sea or Lake",
}
@DATASET_REGISTRY.register()
class EuroSAT(DatasetBase):
dataset_dir = "eurosat"
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, "2750")
self.split_path = os.path.join(self.dataset_dir, "split_zhou_EuroSAT.json")
self.split_fewshot_dir = os.path.join(self.dataset_dir, "split_fewshot")
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
train, val, test = OxfordPets.read_split(self.split_path, self.image_dir)
else:
|
NEW_CLASSNAMES = {
"AnnualCrop": "Annual Crop Land",
"Forest": "Forest",
"HerbaceousVegetation": "Herbaceous Vegetation Land",
"Highway": "Highway or Road",
"Industrial": "Industrial Buildings",
"Pasture": "Pasture Land",
"PermanentCrop": "Permanent Crop Land",
"Residential": "Residential Buildings",
"River": "River",
"SeaLake": "Sea or Lake",
}
@DATASET_REGISTRY.register()
class EuroSAT(DatasetBase):
dataset_dir = "eurosat"
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, "2750")
self.split_path = os.path.join(self.dataset_dir, "split_zhou_EuroSAT.json")
self.split_fewshot_dir = os.path.join(self.dataset_dir, "split_fewshot")
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
train, val, test = OxfordPets.read_split(self.split_path, self.image_dir)
else: | train, val, test = DTD.read_and_split_data(self.image_dir, new_cnames=NEW_CLASSNAMES) | 0 | 2023-12-11 03:01:58+00:00 | 4k |
WalBouss/GEM | gem/gem_wrapper.py | [
{
"identifier": "SelfSelfAttention",
"path": "gem/gem_utils.py",
"snippet": "class SelfSelfAttention(nn.Module):\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., ss_attn_iter=1,\n ss_attn_temp=None):\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n self.ss_attn_iter = ss_attn_iter\n self.ss_attn_temp = ss_attn_temp\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x, attn_bias=None, prev_attn=None):\n x = x.transpose(0, 1)\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2]\n self.v_values = v\n # original self-attention for the original path\n attn_ori_return = (q @ k.transpose(-2, -1)) * self.scale\n attn_ori = attn_ori_return.softmax(dim=-1)\n attn_ori = self.attn_drop(attn_ori)\n\n x_ori = (attn_ori @ v).transpose(1, 2).reshape(B, N, C)\n x_ori = self.proj_drop(self.proj(x_ori))\n\n # GEM\n xs1 = v\n xs2 = k\n xs3 = q\n\n if self.ss_attn_temp is None:\n pre_norm = torch.norm(x, dim=-1).mean(dim=-1, keepdim=True).unsqueeze(1).unsqueeze(-1)\n inv_temp = pre_norm * self.scale\n else:\n inv_temp = self.ss_attn_temp\n\n for it in range(self.ss_attn_iter):\n xs1 = F.normalize(xs1, dim=-1)\n xs2 = F.normalize(xs2, dim=-1)\n xs3 = F.normalize(xs3, dim=-1)\n\n attn_return1 = (xs1 @ xs1.transpose(-2, -1)) * inv_temp\n attn_return2 = (xs2 @ xs2.transpose(-2, -1)) * inv_temp\n attn_return3 = (xs3 @ xs3.transpose(-2, -1)) * inv_temp\n\n attn1 = (attn_return1).softmax(dim=-1)\n attn2 = (attn_return2).softmax(dim=-1)\n attn3 = (attn_return3).softmax(dim=-1)\n\n xs1 = attn1 @ xs1\n xs2 = attn2 @ xs2\n xs3 = attn3 @ xs3\n\n # Assigment to V\n xs1 = F.normalize(xs1, dim=-1)\n xs2 = F.normalize(xs2, dim=-1)\n xs3 = F.normalize(xs3, dim=-1)\n\n attn_return1 = (xs1 @ xs1.transpose(-2, -1)) * inv_temp\n attn_return2 = (xs2 @ xs2.transpose(-2, -1)) * inv_temp\n attn_return3 = (xs3 @ xs3.transpose(-2, -1)) * inv_temp\n\n attn1 = (attn_return1).softmax(dim=-1)\n attn2 = (attn_return2).softmax(dim=-1)\n attn3 = (attn_return3).softmax(dim=-1)\n\n xs1 = attn1 @ v\n xs2 = attn2 @ v\n xs3 = attn3 @ v\n xs = (xs1 + xs2 + xs3) / 3\n\n x = xs.transpose(1, 2).reshape(B, N, C)\n x = self.proj_drop(self.proj(x))\n\n return [x.transpose(0, 1), x_ori.transpose(0, 1)]"
},
{
"identifier": "GEMResidualBlock",
"path": "gem/gem_utils.py",
"snippet": "class GEMResidualBlock(nn.Module):\n def __init__(self, res_block):\n super(GEMResidualBlock, self).__init__()\n self.res_block = res_block\n\n def forward(self,\n q_x: torch.Tensor,\n k_x: Optional[torch.Tensor] = None,\n v_x: Optional[torch.Tensor] = None,\n attn_mask: Optional[torch.Tensor] = None,\n ):\n if isinstance(q_x, list):\n x_gem, q_x = q_x\n else:\n x_gem = q_x\n\n x_gem_res, x_ori_res = self.res_block.attn(x=self.res_block.ln_1(q_x))\n x_gem_res, x_ori_res = self.res_block.ls_1(x_gem_res), self.res_block.ls_1(x_ori_res)\n # Original\n x_ori = q_x + x_ori_res\n x_ori = x_ori + self.res_block.ls_2(self.res_block.mlp(self.res_block.ln_2(x_ori)))\n # GEM\n x_gem = x_gem + x_gem_res\n return [x_gem, x_ori]"
},
{
"identifier": "modified_vit_forward",
"path": "gem/gem_utils.py",
"snippet": "def modified_vit_forward(self, x: torch.Tensor):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n grid_h, grid_w = x.shape[2:]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat([_expand_token(self.class_embedding, x.shape[0]).to(x.dtype), x], dim=1)\n # shape = [*, grid ** 2 + 1, width]\n\n if x.shape[1] != self.positional_embedding.shape[1]:\n pos_emb = resample_abs_pos_embed(self.positional_embedding.unsqueeze(0),\n new_size=[grid_h, grid_w],\n # old_size=list(self.grid_size),\n num_prefix_tokens=1,\n interpolation='bicubic',\n antialias=True)\n\n else:\n pos_emb = self.positional_embedding\n\n x = x + pos_emb.to(x.dtype)\n # x = x + self.positional_embedding.to(x.dtype)\n\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x_gem, x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n x_gem = x_gem.permute(1, 0, 2) # LND -> NLD\n\n # Apply proj\n x = self.ln_post(x)\n x_gem = self.ln_post(x_gem)\n if self.proj is not None:\n x = x @ self.proj\n x_gem = x_gem @ self.proj\n\n return [x_gem, x]"
}
] | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from open_clip.transformer import VisionTransformer
from .gem_utils import SelfSelfAttention, GEMResidualBlock, modified_vit_forward | 2,201 |
class GEMWrapper(nn.Module):
def __init__(self, model, tokenizer, depth=7, ss_attn_iter=1, ss_attn_temp=None):
super(GEMWrapper, self).__init__()
self.model = model
self.tokenizer = tokenizer
self.depth = depth
self.ss_attn_iter = ss_attn_iter
self.ss_attn_temp = ss_attn_temp
self.patch_size = self.model.visual.patch_size[0]
self.apply_gem()
def apply_gem(self):
for i in range(1, self.depth):
# Extract info from the original ViT
num_heads = self.model.visual.transformer.resblocks[-i].attn.num_heads
dim = int(self.model.visual.transformer.resblocks[-i].attn.head_dim * num_heads)
qkv_bias = True
# Init the self-self attention layer
ss_attn = SelfSelfAttention(dim=dim, num_heads=num_heads, qkv_bias=qkv_bias,
ss_attn_iter=self.ss_attn_iter, ss_attn_temp=self.ss_attn_temp)
# Copy necessary weights
ss_attn.qkv.weight.data = self.model.visual.transformer.resblocks[-i].attn.in_proj_weight.clone()
ss_attn.qkv.bias.data = self.model.visual.transformer.resblocks[-i].attn.in_proj_bias.clone()
ss_attn.proj.weight.data = self.model.visual.transformer.resblocks[-i].attn.out_proj.weight.clone()
ss_attn.proj.bias.data = self.model.visual.transformer.resblocks[-i].attn.out_proj.bias.clone()
# Swap the original Attention with our SelfSelfAttention
self.model.visual.transformer.resblocks[-i].attn = ss_attn
# Wrap Residual block to handle SelfSelfAttention outputs
|
class GEMWrapper(nn.Module):
def __init__(self, model, tokenizer, depth=7, ss_attn_iter=1, ss_attn_temp=None):
super(GEMWrapper, self).__init__()
self.model = model
self.tokenizer = tokenizer
self.depth = depth
self.ss_attn_iter = ss_attn_iter
self.ss_attn_temp = ss_attn_temp
self.patch_size = self.model.visual.patch_size[0]
self.apply_gem()
def apply_gem(self):
for i in range(1, self.depth):
# Extract info from the original ViT
num_heads = self.model.visual.transformer.resblocks[-i].attn.num_heads
dim = int(self.model.visual.transformer.resblocks[-i].attn.head_dim * num_heads)
qkv_bias = True
# Init the self-self attention layer
ss_attn = SelfSelfAttention(dim=dim, num_heads=num_heads, qkv_bias=qkv_bias,
ss_attn_iter=self.ss_attn_iter, ss_attn_temp=self.ss_attn_temp)
# Copy necessary weights
ss_attn.qkv.weight.data = self.model.visual.transformer.resblocks[-i].attn.in_proj_weight.clone()
ss_attn.qkv.bias.data = self.model.visual.transformer.resblocks[-i].attn.in_proj_bias.clone()
ss_attn.proj.weight.data = self.model.visual.transformer.resblocks[-i].attn.out_proj.weight.clone()
ss_attn.proj.bias.data = self.model.visual.transformer.resblocks[-i].attn.out_proj.bias.clone()
# Swap the original Attention with our SelfSelfAttention
self.model.visual.transformer.resblocks[-i].attn = ss_attn
# Wrap Residual block to handle SelfSelfAttention outputs | self.model.visual.transformer.resblocks[-i] = GEMResidualBlock(self.model.visual.transformer.resblocks[-i]) | 1 | 2023-12-05 08:23:35+00:00 | 4k |
JeffersonQin/DungeonAssistant | registration.py | [
{
"identifier": "o3dobj",
"path": "utils/o3dobj.py",
"snippet": "def get_o3d_unit_block_at_origin():\ndef get_o3d_trajectory_object(points, color=(1, 0, 0)):\n def transform_o3d_format(points):"
},
{
"identifier": "io",
"path": "utils/io.py",
"snippet": "def load_point_clouds(\n pointcloud_base, pointcloud_prefix, merge_cnt, overlap_discard_num, voxel_size=0.0\n):\ndef load_coordinates_and_timestamps(json_file):\ndef load_transformation_matrices(transformation_dir: str):\ndef save_coodinates_and_timestamps(json_file, points, timestamps):"
},
{
"identifier": "tfm",
"path": "utils/tfm.py",
"snippet": "def transform_trajectory(points, transformation):\ndef transform_clouds_and_trajectories(clouds, trajectories, matrices):\ndef retrieve_floor_plan(cloud, scale=100):"
}
] | import json
import argparse
import os
import os.path as osp
import time
import open3d as o3d
import numpy as np
import copy
import matplotlib.pyplot as plt
from utils import o3dobj
from utils import io
from utils import tfm | 2,165 | target,
max_correspondence_distance,
estimation_method=estimation,
criteria=criteria,
)
return result
if __name__ == "__main__":
voxel_size_fgr = args.voxel_size_fgr
voxel_size_icp = args.voxel_size_icp
(
cloud_1,
cloud_2,
cloud_1_down,
cloud_2_down,
cloud_1_fpfh,
cloud_2_fpfh,
) = prepare_dataset(voxel_size=voxel_size_fgr)
color_1 = [0.9450980392, 0.5764705882, 0.7098039216]
color_2 = [0.11, 0.72, 0.89]
cloud_1.paint_uniform_color(color_1)
cloud_2.paint_uniform_color(color_2)
cloud_1_down.paint_uniform_color(color_1)
cloud_2_down.paint_uniform_color(color_2)
# axis
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0])
# unit block
unit_block = o3dobj.get_o3d_unit_block_at_origin()
# Visualize point cloud
print("Initial preview ... Close window to continue")
o3d.visualization.draw_geometries([cloud_1_down, cloud_2_down, axis, unit_block])
# FGR
transformation_fast = None
if args.fast_cache != "":
if osp.exists(args.fast_cache):
print("Loading fast global registration cache from: ", args.fast_cache)
transformation_fast = np.load(args.fast_cache, allow_pickle=True)
if transformation_fast is None:
print(
"Fast global registration cache not found. Running fast global registration..."
)
start = time.time()
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug
) as cm:
result_fast = execute_fast_global_registration(
cloud_1_down, cloud_2_down, cloud_1_fpfh, cloud_2_fpfh, voxel_size_fgr
)
print(f"Fast global registration took {(time.time() - start):.3f} sec.\n")
print(result_fast)
transformation_fast = result_fast.transformation
np.save("registration_fgr.npy", transformation_fast)
cloud_1.transform(transformation_fast)
# Visualize point cloud
print("FGR preview ... Close window to continue")
o3d.visualization.draw_geometries([cloud_1, cloud_2, axis, unit_block])
# Vanilla ICP
if not args.skip_icp:
(
_,
_,
cloud_1_down,
cloud_2_down,
_,
_,
) = prepare_dataset(voxel_size=voxel_size_icp)
cloud_1_down.transform(transformation_fast)
transformation_icp = None
if args.icp_cache != "":
if osp.exists(args.icp_cache):
print("Loading icp cache from: ", args.icp_cache)
transformation_icp = np.load(args.icp_cache, allow_pickle=True)
if transformation_icp is None:
print("ICP cache not found. Running ICP...")
s = time.time()
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug
) as cm:
result_icp = execute_vanilla_icp(cloud_1_down, cloud_2_down)
icp_time = time.time() - s
print("Time taken by ICP: ", icp_time)
print("Inlier Fitness: ", result_icp.fitness)
print("Inlier RMSE: ", result_icp.inlier_rmse)
transformation_icp = result_icp.transformation
np.save("registration_icp.npy", transformation_icp)
cloud_1.transform(transformation_icp)
else:
transformation_icp = np.identity(4)
if trajectory_file_path_1 != "":
# trajectory
points_1, timestamps_1 = io.load_coordinates_and_timestamps(
trajectory_file_path_1
)
# transformation
|
parser = argparse.ArgumentParser()
parser.add_argument(
"--pointcloud1",
type=str,
default="pointcloud1.ply",
help="first point cloud file path (1 --[transform]-> 2)",
)
parser.add_argument(
"--pointcloud2",
type=str,
default="pointcloud2.ply",
help="second point cloud file path (1 --[transform]-> 2)",
)
parser.add_argument(
"--trajectory1",
type=str,
default="trajectory1.json",
help="first trajectory file path",
)
parser.add_argument(
"--trajectory2",
type=str,
default="trajectory2.json",
help="second trajectory file path",
)
parser.add_argument(
"--fast_cache",
type=str,
default="",
help="transformation cache of fast global registration if available. default is none",
)
parser.add_argument(
"--icp_cache",
type=str,
default="",
help="transformation cache of icp if available. default is none",
)
parser.add_argument(
"--voxel_size_fgr",
type=float,
default=0.05,
help="voxel size for global fast registration downsampling. default is 0.05",
)
parser.add_argument(
"--voxel_size_icp",
type=float,
default=0.05,
help="voxel size for icp downsampling. default is 0.05",
)
parser.add_argument("--skip_icp", action="store_true", help="skip icp and only run fgr")
parser.add_argument(
"--transformed_trajectory_out",
type=str,
default="trajectory_1.jsonl",
help="output trajectory of the transformed trajectory 1 (to trajectory 2)",
)
args = parser.parse_args()
pointcloud_file_path_1 = args.pointcloud1
pointcloud_file_path_2 = args.pointcloud2
trajectory_file_path_1 = args.trajectory1
trajectory_file_path_2 = args.trajectory2
def preprocess_point_cloud(pcd, voxel_size):
"""Downsamples the point cloud and computes the normals and FPFH features"""
print(f":: Downsample with a voxel size {voxel_size:.3f}.")
pcd_down = pcd.voxel_down_sample(voxel_size)
radius_normal = voxel_size * 2
print(f":: Estimate normal with search radius {radius_normal:.3f}.")
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30)
)
radius_feature = voxel_size * 5
print(f":: Compute FPFH feature with search radius {radius_feature:.3f}.")
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100),
)
return pcd_down, pcd_fpfh
def prepare_dataset(voxel_size):
"""Loads two point clouds and downsamples them."""
print(":: Load two point clouds")
source = o3d.io.read_point_cloud(pointcloud_file_path_1)
target = o3d.io.read_point_cloud(pointcloud_file_path_2)
source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
return source, target, source_down, target_down, source_fpfh, target_fpfh
def execute_fast_global_registration(
source_down, target_down, source_fpfh, target_fpfh, voxel_size
):
"""Performs fast global registration on the downsampled point clouds"""
distance_threshold = voxel_size * 0.5
print(
f":: Apply fast global registration with distance threshold {distance_threshold:.3f}"
)
result = o3d.pipelines.registration.registration_fgr_based_on_feature_matching(
source_down,
target_down,
source_fpfh,
target_fpfh,
o3d.pipelines.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=distance_threshold
),
)
return result
def execute_vanilla_icp(source, target):
"""Performs vanilla ICP on the point clouds"""
estimation = o3d.pipelines.registration.TransformationEstimationPointToPlane()
max_correspondence_distance = 0.5
# Convergence-Criteria for Vanilla ICP
criteria = o3d.pipelines.registration.ICPConvergenceCriteria(
relative_fitness=0.000001, relative_rmse=0.000001, max_iteration=50
)
result = o3d.pipelines.registration.registration_icp(
source,
target,
max_correspondence_distance,
estimation_method=estimation,
criteria=criteria,
)
return result
if __name__ == "__main__":
voxel_size_fgr = args.voxel_size_fgr
voxel_size_icp = args.voxel_size_icp
(
cloud_1,
cloud_2,
cloud_1_down,
cloud_2_down,
cloud_1_fpfh,
cloud_2_fpfh,
) = prepare_dataset(voxel_size=voxel_size_fgr)
color_1 = [0.9450980392, 0.5764705882, 0.7098039216]
color_2 = [0.11, 0.72, 0.89]
cloud_1.paint_uniform_color(color_1)
cloud_2.paint_uniform_color(color_2)
cloud_1_down.paint_uniform_color(color_1)
cloud_2_down.paint_uniform_color(color_2)
# axis
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0])
# unit block
unit_block = o3dobj.get_o3d_unit_block_at_origin()
# Visualize point cloud
print("Initial preview ... Close window to continue")
o3d.visualization.draw_geometries([cloud_1_down, cloud_2_down, axis, unit_block])
# FGR
transformation_fast = None
if args.fast_cache != "":
if osp.exists(args.fast_cache):
print("Loading fast global registration cache from: ", args.fast_cache)
transformation_fast = np.load(args.fast_cache, allow_pickle=True)
if transformation_fast is None:
print(
"Fast global registration cache not found. Running fast global registration..."
)
start = time.time()
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug
) as cm:
result_fast = execute_fast_global_registration(
cloud_1_down, cloud_2_down, cloud_1_fpfh, cloud_2_fpfh, voxel_size_fgr
)
print(f"Fast global registration took {(time.time() - start):.3f} sec.\n")
print(result_fast)
transformation_fast = result_fast.transformation
np.save("registration_fgr.npy", transformation_fast)
cloud_1.transform(transformation_fast)
# Visualize point cloud
print("FGR preview ... Close window to continue")
o3d.visualization.draw_geometries([cloud_1, cloud_2, axis, unit_block])
# Vanilla ICP
if not args.skip_icp:
(
_,
_,
cloud_1_down,
cloud_2_down,
_,
_,
) = prepare_dataset(voxel_size=voxel_size_icp)
cloud_1_down.transform(transformation_fast)
transformation_icp = None
if args.icp_cache != "":
if osp.exists(args.icp_cache):
print("Loading icp cache from: ", args.icp_cache)
transformation_icp = np.load(args.icp_cache, allow_pickle=True)
if transformation_icp is None:
print("ICP cache not found. Running ICP...")
s = time.time()
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug
) as cm:
result_icp = execute_vanilla_icp(cloud_1_down, cloud_2_down)
icp_time = time.time() - s
print("Time taken by ICP: ", icp_time)
print("Inlier Fitness: ", result_icp.fitness)
print("Inlier RMSE: ", result_icp.inlier_rmse)
transformation_icp = result_icp.transformation
np.save("registration_icp.npy", transformation_icp)
cloud_1.transform(transformation_icp)
else:
transformation_icp = np.identity(4)
if trajectory_file_path_1 != "":
# trajectory
points_1, timestamps_1 = io.load_coordinates_and_timestamps(
trajectory_file_path_1
)
# transformation | points_1 = tfm.transform_trajectory(points_1, transformation_fast) | 2 | 2023-12-08 19:52:08+00:00 | 4k |
KAIST-VICLab/From_Ground_To_Objects | networks/depth_decoder.py | [
{
"identifier": "ConvBlock",
"path": "networks/layers.py",
"snippet": "class ConvBlock(nn.Module):\r\n \"\"\"Layer to perform a convolution followed by ELU\r\n \"\"\"\r\n\r\n def __init__(self, in_channels, out_channels):\r\n super(ConvBlock, self).__init__()\r\n\r\n self.conv = Conv3x3(in_channels, out_channels)\r\n self.nonlin = nn.ELU(inplace=True)\r\n\r\n def forward(self, x):\r\n out = self.conv(x)\r\n out = self.nonlin(out)\r\n return out\r"
},
{
"identifier": "Conv3x3",
"path": "networks/layers.py",
"snippet": "class Conv3x3(nn.Module):\r\n \"\"\"Layer to pad and convolve input\r\n \"\"\"\r\n\r\n def __init__(self, in_channels, out_channels, use_refl=True):\r\n super(Conv3x3, self).__init__()\r\n\r\n if use_refl:\r\n self.pad = nn.ReflectionPad2d(1)\r\n else:\r\n self.pad = nn.ZeroPad2d(1)\r\n self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)\r\n\r\n def forward(self, x):\r\n out = self.pad(x)\r\n out = self.conv(out)\r\n return out\r"
},
{
"identifier": "upsample",
"path": "networks/layers.py",
"snippet": "def upsample(x):\r\n \"\"\"Upsample input tensor by a factor of 2\r\n \"\"\"\r\n return F.interpolate(x, scale_factor=2, mode=\"nearest\")\r"
},
{
"identifier": "disp_to_depth",
"path": "networks/layers.py",
"snippet": "def disp_to_depth(disp, min_depth, max_depth):\r\n \"\"\"Convert network's sigmoid output into depth prediction\r\n The formula for this conversion is given in the 'additional considerations'\r\n section of the paper.\r\n \"\"\"\r\n min_disp = 1 / max_depth\r\n max_disp = 1 / min_depth\r\n scaled_disp = min_disp + (max_disp - min_disp) * disp\r\n depth = 1 / scaled_disp\r\n\r\n return scaled_disp, depth\r"
},
{
"identifier": "coords_to_normals",
"path": "networks/layers.py",
"snippet": "def coords_to_normals(coords):\r\n \"\"\"Calculate surface normals using first order finite-differences.\r\n https://github.com/voyleg/perceptual-depth-sr/\r\n Parameters\r\n ----------\r\n coords : array_like\r\n Coordinates of the points (**, 3, h, w).\r\n Returns\r\n -------\r\n normals : torch.Tensor\r\n Surface normals (**, 3, h, w).\r\n \"\"\"\r\n coords = torch.as_tensor(coords)\r\n if coords.ndim < 4:\r\n coords = coords[None]\r\n\r\n dxdu = coords[..., 0, :, 1:] - coords[..., 0, :, :-1]\r\n dydu = coords[..., 1, :, 1:] - coords[..., 1, :, :-1]\r\n dzdu = coords[..., 2, :, 1:] - coords[..., 2, :, :-1]\r\n dxdv = coords[..., 0, 1:, :] - coords[..., 0, :-1, :]\r\n dydv = coords[..., 1, 1:, :] - coords[..., 1, :-1, :]\r\n dzdv = coords[..., 2, 1:, :] - coords[..., 2, :-1, :]\r\n\r\n dxdu = torch.nn.functional.pad(dxdu, (0, 1), mode='replicate')\r\n dydu = torch.nn.functional.pad(dydu, (0, 1), mode='replicate')\r\n dzdu = torch.nn.functional.pad(dzdu, (0, 1), mode='replicate')\r\n\r\n # pytorch cannot just do `dxdv = torch.nn.functional.pad(dxdv, (0, 0, 0, 1), mode='replicate')`, so\r\n dxdv = torch.cat([dxdv, dxdv[..., -1:, :]], dim=-2)\r\n dydv = torch.cat([dydv, dydv[..., -1:, :]], dim=-2)\r\n dzdv = torch.cat([dzdv, dzdv[..., -1:, :]], dim=-2)\r\n\r\n n_x = dydv * dzdu - dydu * dzdv\r\n n_y = dzdv * dxdu - dzdu * dxdv\r\n n_z = dxdv * dydu - dxdu * dydv\r\n\r\n n = torch.stack([n_x, n_y, n_z], dim=-3)\r\n n = torch.nn.functional.normalize(n, dim=-3)\r\n return n\r"
}
] | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from .layers import ConvBlock, Conv3x3, upsample, disp_to_depth, coords_to_normals
from timm.models.layers import trunc_normal_
from .cadepth import SPM, DEM | 1,801 | # Copyright Niantic 2021. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the ManyDepth licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
class DepthDecoder(nn.Module):
def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True,
opt=None, backproject_depth=None, min_depth=0.1, max_depth=100):
super(DepthDecoder, self).__init__()
self.num_output_channels = num_output_channels
self.use_skips = use_skips
self.upsample_mode = 'nearest'
self.scales = scales
self.opt = opt
self.num_ch_enc = num_ch_enc
self.num_ch_dec = np.array([16, 32, 64, 128, 256])
self.backproject_depth = backproject_depth
self.min_depth = min_depth
self.max_depth = max_depth
# decoder
self.convs = OrderedDict()
for i in range(4, -1, -1):
# upconv_0
num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]
if self.opt["use_surface_normal"] and i != 4:
num_ch_in += 3
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out)
# upconv_1
num_ch_in = self.num_ch_dec[i]
if self.use_skips and i > 0:
num_ch_in += self.num_ch_enc[i - 1]
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out)
if self.opt['cadepth']:
self.convs[("dem", i)] = DEM(num_ch_in)
for s in self.scales:
self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels)
if self.opt['cadepth']:
self.spm = SPM(self.num_ch_enc[-1])
self.decoder = nn.ModuleList(list(self.convs.values()))
self.sigmoid = nn.Sigmoid()
def disp_to_surface_normal(self, disp, inputs, scale):
# useful information from disparity map to scale invariant surface normal vector map
B, _, H, W = disp.shape
_, depth = disp_to_depth(disp, self.min_depth, self.max_depth)
coords = self.backproject_depth[scale](depth, inputs[('inv_K', scale)])
normals = coords_to_normals(coords[:, :3].view(-1, 3, H, W))
return (normals + 1) / 2
def forward(self, input_features, inputs=None):
self.outputs = {}
# decoder
x = input_features[-1]
if self.opt["cadepth"]:
x = self.spm(x)
for i in range(4, -1, -1):
x = self.convs[("upconv", i, 0)](x)
| # Copyright Niantic 2021. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the ManyDepth licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
class DepthDecoder(nn.Module):
def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True,
opt=None, backproject_depth=None, min_depth=0.1, max_depth=100):
super(DepthDecoder, self).__init__()
self.num_output_channels = num_output_channels
self.use_skips = use_skips
self.upsample_mode = 'nearest'
self.scales = scales
self.opt = opt
self.num_ch_enc = num_ch_enc
self.num_ch_dec = np.array([16, 32, 64, 128, 256])
self.backproject_depth = backproject_depth
self.min_depth = min_depth
self.max_depth = max_depth
# decoder
self.convs = OrderedDict()
for i in range(4, -1, -1):
# upconv_0
num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]
if self.opt["use_surface_normal"] and i != 4:
num_ch_in += 3
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out)
# upconv_1
num_ch_in = self.num_ch_dec[i]
if self.use_skips and i > 0:
num_ch_in += self.num_ch_enc[i - 1]
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out)
if self.opt['cadepth']:
self.convs[("dem", i)] = DEM(num_ch_in)
for s in self.scales:
self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels)
if self.opt['cadepth']:
self.spm = SPM(self.num_ch_enc[-1])
self.decoder = nn.ModuleList(list(self.convs.values()))
self.sigmoid = nn.Sigmoid()
def disp_to_surface_normal(self, disp, inputs, scale):
# useful information from disparity map to scale invariant surface normal vector map
B, _, H, W = disp.shape
_, depth = disp_to_depth(disp, self.min_depth, self.max_depth)
coords = self.backproject_depth[scale](depth, inputs[('inv_K', scale)])
normals = coords_to_normals(coords[:, :3].view(-1, 3, H, W))
return (normals + 1) / 2
def forward(self, input_features, inputs=None):
self.outputs = {}
# decoder
x = input_features[-1]
if self.opt["cadepth"]:
x = self.spm(x)
for i in range(4, -1, -1):
x = self.convs[("upconv", i, 0)](x) | x = [upsample(x)] | 2 | 2023-12-12 08:29:30+00:00 | 4k |
marc-rigter/polygrad-world-models | polygrad/models/diffusion.py | [
{
"identifier": "default_sample_fn",
"path": "polygrad/sampling/functions.py",
"snippet": "@torch.no_grad()\ndef default_sample_fn(model, x, act, cond, t, q_sample, condition_noise_scale, policy, normalizer):\n timesteps = make_timesteps(x.shape[0], t, x.device)\n\n # rescale actions\n act_scale = q_sample(act, timesteps, noise=torch.randn_like(act) * condition_noise_scale)\n model_mean, _, model_log_variance = model.p_mean_variance(x=x, act=act_scale, t=timesteps)\n model_std = torch.exp(0.5 * model_log_variance)\n\n # no noise when t == 0\n noise = torch.randn_like(x)\n noise[timesteps == 0] = 0\n return model_mean + model_std * noise, None, 0.0"
},
{
"identifier": "policy_guided_sample_fn",
"path": "polygrad/sampling/functions.py",
"snippet": "def policy_guided_sample_fn(\n model,\n x,\n act_noisy,\n cond,\n t,\n q_sample,\n policy,\n normalizer,\n condition_noise_scale=0.0,\n guidance_scale=1.0,\n action_noise_scale=1.0,\n clip_std=None,\n states_for_guidance='recon',\n update_states=False,\n guidance_type='grad',\n clip_state_change=1.0,\n ):\n ''' Compute new sample after one step of denoising by diffusion model with policy guidance. \n '''\n assert guidance_type in ['grad', 'sample', 'none']\n timesteps = make_timesteps(x.shape[0], t, x.device)\n\n # compute predicted denoised trajectory\n with torch.no_grad():\n act_scale = q_sample(act_noisy, timesteps, noise=torch.zeros_like(act_noisy))\n with torch.autocast(device_type=\"cuda\", dtype=torch.float16):\n prediction = model.model(x, act_scale, timesteps)\n x_recon = model.predict_start_from_noise(x, t=timesteps, noise=prediction)\n x_recon = apply_conditioning(x_recon, cond, model.observation_dim)\n\n model_mean, _, model_log_variance = model.q_posterior(x_start=x_recon, x_t=x, t=timesteps)\n model_std = torch.exp(0.5 * model_log_variance)\n noise = torch.randn_like(x)\n\n # clip magnitude of change near end of diffusion\n if t <= 10:\n model_mean = clip_change(x, model_mean, clip_state_change)\n \n if states_for_guidance == 'recon':\n guide_states = x_recon[:, :, :model.observation_dim].detach()\n elif states_for_guidance == 'posterior_mean':\n guide_states = model_mean[:, :, :model.observation_dim].detach()\n else:\n raise NotImplementedError\n\n # no guidance when t == 0\n if t == 0:\n if clip_std is not None:\n act_noisy_unnormed = normalizer.unnormalize(act_noisy, \"actions\")\n policy_dist = policy(guide_states, normed_input=True)\n act_noisy_unnormed = torch.clamp(\n act_noisy_unnormed,\n min=policy_dist.mean-clip_std*policy_dist.stddev,\n max=policy_dist.mean+clip_std*policy_dist.stddev)\n act_noisy = normalizer.normalize(act_noisy_unnormed, \"actions\")\n metrics = {\n \"avg_change\": (model_mean - x).abs().mean().item(),\n \"max_change\": (model_mean - x).abs().max().item(),\n \"min_change\": (model_mean - x).abs().min().item(),\n \"std_change\": (model_mean - x).abs().std().item(),\n }\n return model_mean, act_noisy, metrics\n\n if guidance_type == 'grad':\n # unnormalize as policy ouputs unnormalized actions\n act_noisy_unnormed = normalizer.unnormalize(act_noisy, \"actions\")\n\n # compute policy distribution at denoised states\n with torch.no_grad():\n policy_dist = policy(guide_states, normed_input=True)\n\n if clip_std is not None:\n act_noisy_unnormed = torch.clamp(\n act_noisy_unnormed,\n min=policy_dist.mean-clip_std*policy_dist.stddev,\n max=policy_dist.mean+clip_std*policy_dist.stddev)\n \n # if not act_noisy_unnormed.requires_grad:\n act_noisy_unnormed.requires_grad = True\n\n # backprop likelihood of actions in predicted trajectory under policy\n act_logprob = policy_dist.log_prob(act_noisy_unnormed)\n loss = act_logprob.sum()\n loss.backward()\n \n # gradient update to actions\n act_grad = act_noisy_unnormed.grad.detach()\n act_noisy_unnormed = (act_noisy_unnormed + guidance_scale * act_grad).detach()\n\n # gradient update to states\n if update_states:\n guide_states.requires_grad = True\n policy_dist = policy(guide_states, normed_input=True)\n act_logprob = policy_dist.log_prob(act_noisy_unnormed)\n loss = act_logprob.sum()\n loss.backward()\n obs_grad = guide_states.grad.detach()\n obs_recon = (guide_states + guidance_scale * obs_grad).detach()\n x_recon[:, :, :model.observation_dim] = obs_recon\n x_recon = apply_conditioning(x_recon, cond, model.observation_dim)\n model_mean, _, model_log_variance = model.q_posterior(x_start=x_recon, x_t=x, t=timesteps)\n \n # normalize actions\n act_denoised = normalizer.normalize(act_noisy_unnormed, \"actions\")\n act_sample = act_denoised + action_noise_scale * model_std * torch.randn_like(act_denoised)\n\n elif guidance_type == 'sample':\n with torch.no_grad():\n policy_dist = policy(guide_states, normed_input=True)\n act_sample_unnormed = policy_dist.sample()\n act_sample = normalizer.normalize(act_sample_unnormed, \"actions\")\n \n elif guidance_type == 'none':\n act_sample = act_noisy\n\n return model_mean + model_std * noise, act_sample, 0.0"
},
{
"identifier": "cosine_beta_schedule",
"path": "polygrad/models/helpers.py",
"snippet": "class SinusoidalPosEmb(nn.Module):\nclass Downsample1d(nn.Module):\nclass Upsample1d(nn.Module):\nclass Conv1dBlock(nn.Module):\nclass Residual(nn.Module):\nclass LayerNorm(nn.Module):\nclass PreNorm(nn.Module):\nclass LinearAttention(nn.Module):\nclass WeightedLoss(nn.Module):\nclass ValueLoss(nn.Module):\nclass WeightedL1(WeightedLoss):\nclass WeightedL2(WeightedLoss):\nclass ValueL1(ValueLoss):\nclass ValueL2(ValueLoss):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8):\n def forward(self, x):\n def __init__(self, fn):\n def forward(self, x, *args, **kwargs):\n def __init__(self, dim, eps = 1e-5):\n def forward(self, x):\n def __init__(self, dim, fn):\n def forward(self, x):\n def __init__(self, dim, heads=4, dim_head=32):\n def forward(self, x):\ndef extract(a, t, x_shape):\ndef cosine_beta_schedule(timesteps, tau=1.0, start=0.0, end=1, dtype=torch.float32):\ndef linear_beta_schedule(timesteps):\ndef apply_conditioning(x, conditions, obs_dim):\n def __init__(self, weights):\n def forward(self, pred, targ):\n def __init__(self, *args):\n def forward(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):"
}
] | from collections import namedtuple
from torch import nn
from polygrad.sampling.functions import default_sample_fn, policy_guided_sample_fn
from .helpers import (
cosine_beta_schedule,
extract,
apply_conditioning,
Losses,
)
import numpy as np
import torch
import pdb
import torch.nn.functional as F
import polygrad.utils as utils | 3,426 | action_weight=1.0, loss_discount=1.0, loss_weights=None,
noise_sched_tau=1.0, mask_obs=False, max_prediction_weight=1.0,
temporal_loss_weight=1.0, action_condition_noise_scale=1.0,
):
super().__init__()
self.horizon = horizon
self.observation_dim = observation_dim
self.action_dim = action_dim
self.transition_dim = observation_dim + 2 # obs + reward + terminals
self.model = model
self.action_condition_noise_scale = action_condition_noise_scale
betas = cosine_beta_schedule(n_timesteps, tau=noise_sched_tau)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, axis=0)
alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]])
self.n_timesteps = int(n_timesteps)
self.clip_denoised = clip_denoised
self.predict_epsilon = predict_epsilon
self.register_buffer('betas', betas)
self.register_buffer('alphas_cumprod', alphas_cumprod)
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
self.register_buffer('posterior_variance', posterior_variance)
## log calculation clipped because the posterior variance
## is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped',
torch.log(torch.clamp(posterior_variance, min=1e-20)))
coef1 = betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)
coef2 = (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)
self.register_buffer('posterior_mean_coef1', coef1)
self.register_buffer('posterior_mean_coef2', coef2)
## get loss coefficients and initialize objective
loss_weights = torch.linspace(temporal_loss_weight, 1 / temporal_loss_weight, horizon)
loss_weights = loss_weights[None, :, None]
self.loss_fn = Losses[loss_type](loss_weights)
def get_loss_weights(self, action_weight, discount, weights_dict):
'''
sets loss coefficients for trajectory
action_weight : float
coefficient on first action loss
discount : float
multiplies t^th timestep of trajectory loss by discount**t
weights_dict : dict
{ i: c } multiplies dimension i of observation loss by c
'''
self.action_weight = action_weight
dim_weights = torch.ones(self.transition_dim, dtype=torch.float32)
## set loss coefficients for dimensions of observation
if weights_dict is None: weights_dict = {}
for ind, w in weights_dict.items():
dim_weights[ind] *= w
## decay loss with trajectory timestep: discount**t
discounts = discount ** torch.arange(self.horizon, dtype=torch.float)
discounts = discounts / discounts.mean()
loss_weights = torch.einsum('h,t->ht', discounts, dim_weights)
## manually set a0 weight
loss_weights[0, (-1 - self.action_dim):-1] = action_weight
return loss_weights
#------------------------------------------ sampling ------------------------------------------#
def predict_start_from_noise(self, x_t, t, noise):
'''
if self.predict_epsilon, model output is (scaled) noise;
otherwise, model predicts x0 directly
'''
if self.predict_epsilon:
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
else:
return noise
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, act, t):
with torch.autocast(device_type="cuda", dtype=torch.float16):
prediction = self.model(x, act, t)
x_recon = self.predict_start_from_noise(x, t=t, noise=prediction)
if self.clip_denoised:
x_recon.clamp_(-1., 1.)
else:
assert RuntimeError()
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(
x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
def p_sample_loop(self, shape, cond, act=None, normalizer=None, policy=None, return_sequence=False, verbose=True, return_chain=False, **sample_kwargs):
if policy is None:
|
Sample = namedtuple('Sample', 'trajectories chains recons_after_guide recons_before_guide')
def sort_by_values(x, values):
inds = torch.argsort(values, descending=True)
x = x[inds]
values = values[inds]
return x, values
def make_timesteps(batch_size, i, device):
t = torch.full((batch_size,), i, device=device, dtype=torch.long)
return t
class GaussianDiffusion(nn.Module):
def __init__(self, model, horizon, observation_dim, action_dim, n_timesteps=1000,
loss_type='l1', clip_denoised=False, predict_epsilon=True,
action_weight=1.0, loss_discount=1.0, loss_weights=None,
noise_sched_tau=1.0, mask_obs=False, max_prediction_weight=1.0,
temporal_loss_weight=1.0, action_condition_noise_scale=1.0,
):
super().__init__()
self.horizon = horizon
self.observation_dim = observation_dim
self.action_dim = action_dim
self.transition_dim = observation_dim + 2 # obs + reward + terminals
self.model = model
self.action_condition_noise_scale = action_condition_noise_scale
betas = cosine_beta_schedule(n_timesteps, tau=noise_sched_tau)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, axis=0)
alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]])
self.n_timesteps = int(n_timesteps)
self.clip_denoised = clip_denoised
self.predict_epsilon = predict_epsilon
self.register_buffer('betas', betas)
self.register_buffer('alphas_cumprod', alphas_cumprod)
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
self.register_buffer('posterior_variance', posterior_variance)
## log calculation clipped because the posterior variance
## is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped',
torch.log(torch.clamp(posterior_variance, min=1e-20)))
coef1 = betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)
coef2 = (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)
self.register_buffer('posterior_mean_coef1', coef1)
self.register_buffer('posterior_mean_coef2', coef2)
## get loss coefficients and initialize objective
loss_weights = torch.linspace(temporal_loss_weight, 1 / temporal_loss_weight, horizon)
loss_weights = loss_weights[None, :, None]
self.loss_fn = Losses[loss_type](loss_weights)
def get_loss_weights(self, action_weight, discount, weights_dict):
'''
sets loss coefficients for trajectory
action_weight : float
coefficient on first action loss
discount : float
multiplies t^th timestep of trajectory loss by discount**t
weights_dict : dict
{ i: c } multiplies dimension i of observation loss by c
'''
self.action_weight = action_weight
dim_weights = torch.ones(self.transition_dim, dtype=torch.float32)
## set loss coefficients for dimensions of observation
if weights_dict is None: weights_dict = {}
for ind, w in weights_dict.items():
dim_weights[ind] *= w
## decay loss with trajectory timestep: discount**t
discounts = discount ** torch.arange(self.horizon, dtype=torch.float)
discounts = discounts / discounts.mean()
loss_weights = torch.einsum('h,t->ht', discounts, dim_weights)
## manually set a0 weight
loss_weights[0, (-1 - self.action_dim):-1] = action_weight
return loss_weights
#------------------------------------------ sampling ------------------------------------------#
def predict_start_from_noise(self, x_t, t, noise):
'''
if self.predict_epsilon, model output is (scaled) noise;
otherwise, model predicts x0 directly
'''
if self.predict_epsilon:
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
else:
return noise
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, act, t):
with torch.autocast(device_type="cuda", dtype=torch.float16):
prediction = self.model(x, act, t)
x_recon = self.predict_start_from_noise(x, t=t, noise=prediction)
if self.clip_denoised:
x_recon.clamp_(-1., 1.)
else:
assert RuntimeError()
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(
x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
def p_sample_loop(self, shape, cond, act=None, normalizer=None, policy=None, return_sequence=False, verbose=True, return_chain=False, **sample_kwargs):
if policy is None: | sample_fn = default_sample_fn | 0 | 2023-12-12 21:05:26+00:00 | 4k |
zhongpei/Qwen-SDXL-Turbo | web_demo.py | [
{
"identifier": "start_server",
"path": "file_server.py",
"snippet": "def start_server(server_port):\n # 在单独的线程中启动服务器\n server_thread = threading.Thread(target=_start_server, args=(server_port,))\n server_thread.daemon = True # 设置为守护线程,这样当主程序退出时,服务器线程也会退出\n server_thread.start()"
},
{
"identifier": "get_local_ip",
"path": "file_server.py",
"snippet": "def get_local_ip():\n \"\"\" 获取当前计算机在局域网中的 IP 地址 \"\"\"\n try:\n # 建立一个临时的连接,以便获取本地网络接口的IP地址\n # 这里的 'www.baidu.com' 是Google的公共DNS服务器,端口为80\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"www.baidu.com\", 80))\n local_ip = s.getsockname()[0]\n s.close()\n return local_ip\n except Exception as e:\n print(\"获取本地IP地址时出错:\", e)\n return None"
}
] | import os
import gradio as gr
import mdtex2html
import piexif
import os
import torch
import json
import time
import datetime
import config as conf
import gc
from argparse import ArgumentParser
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from diffusers import AutoPipelineForText2Image
from file_server import start_server, get_local_ip | 3,109 | task_history = gr.State([])
with gr.Row():
with gr.Column(scale=1, min_width=600):
image = gr.Image(type="pil")
query = gr.Textbox(lines=2, label='Input')
with gr.Row():
empty_btn = gr.Button("🧹 Clear History (清除历史)")
submit_btn = gr.Button("🚀 Submit (生成)")
regen_btn = gr.Button("🤔️ Regenerate (重试)")
image_btn = gr.Button("🎨 Image (生成)")
talk_btn = gr.Button("💬 Talk (聊天)")
with gr.Column(scale=1, min_width=600):
with gr.Tab(label="Qwen"):
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.9,
label="Temperature",
info="越小越遵循输入,越大越充满想象"
)
prompt_system_radio = gr.Radio(
["中英文翻译", "文言文", "画家", "剧情", "AI助手"],
label='角色',
info="根据输入选择合适的角色"
)
with gr.Row():
prompt_system = gr.Textbox(
lines=1,
label='System Template',
value="你擅长翻译中文到英语。"
)
prompt_template = gr.Textbox(
lines=1,
label='Prompt Template',
value="必须使用英语根据主题描述一副画面:"
)
chatbot = gr.Chatbot(label='Qwen-Chat', elem_classes="control-height")
with gr.Tab(label="Config"):
with gr.Row():
top_p = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=1.0, label="Top-p")
top_k = gr.Slider(minimum=0, maximum=100, step=1, value=50, label="Top-k")
max_new_tokens = gr.Slider(minimum=1, maximum=1024, step=1, value=77, label="Max New Tokens")
repetition_penalty = gr.Slider(
minimum=1.0,
maximum=2.0,
step=0.01,
value=1.1,
label="repetition penalty",
info="重复惩罚"
)
aspect_ratios_selection = gr.Radio(
label='Aspect Ratios',
choices=conf.available_aspect_ratios,
value=conf.default_aspect_ratio,
info='width × height',
elem_classes='aspect_ratios'
)
num_inference_steps = gr.Slider(minimum=1, maximum=60, step=1, value=16, label="Image Steps")
with gr.Tab(label="History"):
file_server = f"http://{get_local_ip()}:{args.file_server_port}/"
html_file_path = f"{datetime.datetime.now().strftime('%Y-%m-%d')}.html"
html_fns = [fn for fn in os.listdir(OUTPUT_HTML_DIR) if fn.endswith(".html")]
gr.Markdown(f'<a href="{file_server}{html_file_path}" target="_blank">{html_file_path}</a>')
for fn in html_fns:
if fn == html_file_path:
continue
gr.Markdown(f'<a href="{file_server}{fn}" target="_blank">{fn}</a>')
PROMPT_SYSTEM_DICT = {
"中英文翻译": "你擅长翻译中文到英语。",
"文言文": "你擅长文言文翻译为英语。",
"画家": "你是绘画大师,擅长描绘画面细节。",
"剧情": "你是剧作家,擅长创作连续的漫画脚本。",
"AI助手": "You are a helpful assistant",
}
prompt_system_radio.change(lambda val: (PROMPT_SYSTEM_DICT[val]),
inputs=[prompt_system_radio], outputs=[prompt_system])
temperature.change(lambda val: config.update(temperature=val), inputs=[temperature], outputs=[])
top_k.change(lambda val: config.update(top_k=val), inputs=[top_k], outputs=[])
top_p.change(lambda val: config.update(top_p=val), inputs=[top_p], outputs=[])
max_new_tokens.change(
lambda val: config.update(max_new_tokens=val),
inputs=[max_new_tokens],
outputs=[],
)
repetition_penalty.change(
lambda val: config.update(repetition_penalty=val),
inputs=[repetition_penalty],
outputs=[],
)
talk_btn.click(predict, [query, chatbot, task_history, prompt_system], [chatbot],
show_progress=True)
submit_btn.click(predict, [query, chatbot, task_history, prompt_system, prompt_template], [chatbot],
show_progress=True)
submit_btn.click(reset_user_input, [], [query])
empty_btn.click(reset_state, [chatbot, task_history], outputs=[chatbot], show_progress=True)
image_btn.click(draw_image, [chatbot, task_history, aspect_ratios_selection, num_inference_steps],
outputs=[image],
show_progress=True)
regen_btn.click(regenerate, [chatbot, task_history, prompt_system], [chatbot], show_progress=True)
demo.queue().launch(
share=args.share,
inbrowser=args.inbrowser,
server_port=args.server_port,
server_name=args.server_name,
)
def main():
args = _get_args()
| # Copyright (c) Alibaba Cloud.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""A simple web interactive chat demo based on gradio."""
DEFAULT_CKPT_PATH = 'hahahafofo/Qwen-1_8B-Stable-Diffusion-Prompt'
DEFAULT_SDXL_PATH = "Lykon/dreamshaper-xl-turbo" # "stabilityai/sdxl-turbo"
OUTPUT_IMAGES_DIR = "output_images"
OUTPUT_HTML_DIR = "output_html"
def _get_args():
parser = ArgumentParser()
parser.add_argument("-c", "--checkpoint-path", type=str, default=DEFAULT_CKPT_PATH,
help="Checkpoint name or path, default to %(default)r")
parser.add_argument("-x", "--sdxl-path", type=str, default=DEFAULT_SDXL_PATH,
help="SDXL Checkpoint name or path, default to %(default)r")
parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
parser.add_argument("--qwen-only", action="store_true", help="Run demo with qwen only")
parser.add_argument("--share", action="store_true", default=False,
help="Create a publicly shareable link for the interface.")
parser.add_argument("--inbrowser", action="store_true", default=False,
help="Automatically launch the interface in a new tab on the default browser.")
parser.add_argument("--server-port", type=int, default=8000,
help="Demo server port.")
parser.add_argument("--server-name", type=str, default="0.0.0.0",
help="Demo server name.")
parser.add_argument("--file-server-port", type=int, default=8001,
help="file server port.")
args = parser.parse_args()
return args
def _load_model_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(
args.checkpoint_path, trust_remote_code=True, resume_download=True,
)
if args.cpu_only:
device_map = "cpu"
else:
device_map = "auto"
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path,
device_map=device_map,
trust_remote_code=True,
resume_download=True,
).eval()
config = GenerationConfig.from_pretrained(
args.checkpoint_path,
trust_remote_code=True,
resume_download=True,
)
config.max_new_tokens = 77
return model, tokenizer, config
def _load_sdxl_turbo(args):
pipe = AutoPipelineForText2Image.from_pretrained(
args.sdxl_path,
torch_dtype=torch.float16,
variant="fp16"
)
pipe.to("cuda")
return pipe
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert(message),
None if response is None else mdtex2html.convert(response),
)
return y
gr.Chatbot.postprocess = postprocess
def _save_image2html(image, query, prompt):
# 将文本信息编码为 JSON 并保存到 EXIF
exif_dict = {"0th": {}, "Exif": {}, "1st": {}, "thumbnail": None, "GPS": {}}
exif_dict["0th"][piexif.ImageIFD.ImageDescription] = json.dumps({"prompt": prompt})
exif_bytes = piexif.dump(exif_dict)
file_name = f"{int(time.time())}.png"
image_path = os.path.join(OUTPUT_IMAGES_DIR, file_name)
image.save(image_path, "PNG", exif=exif_bytes)
# 创建 HTML 内容
# 初始 HTML 结构
html_start = """<!DOCTYPE html><html lang="zh"><head><meta charset="UTF-8">
<title>Image and Prompt History</title></head><body><h1>Image and Prompt History</h1><ul>"""
html_end = "</ul></body></html>"
# 将 HTML 内容写入文件
html_file_path = os.path.join(OUTPUT_HTML_DIR, f"{datetime.datetime.now().strftime('%Y-%m-%d')}.html")
# 创建新的列表项
new_list_item = f"""
<li>
<p>Prompt: {prompt}</p>
<p>Input: {query}</p>
<img src="{image_path}" alt="{image_path}" style="max-width: 100%; height: auto;">
</li>
"""
# 读取现有的 HTML 文件
try:
with open(html_file_path, 'r', encoding='utf-8') as file:
existing_html = file.read()
except FileNotFoundError:
# 如果文件不存在,创建一个新的 HTML 结构
existing_html = html_start + html_end
# 在列表结束标签前插入新的列表项
updated_html = existing_html.replace(html_end, new_list_item + html_end)
# 将更新后的 HTML 写回文件
with open(html_file_path, 'w+', encoding='utf-8') as file:
file.write(updated_html)
return f"HTML content appended to {html_file_path}"
def _parse_text(text):
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split("`")
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f"<br></code></pre>"
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", r"\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>" + line
text = "".join(lines)
return text
def _launch_demo(args, image_pipe, model, tokenizer, config):
def predict(
_query,
_chatbot,
_task_history,
_prompt_system: str = "You are a helpful assistant",
_prompt_template: str = ""
):
print(f"User: {_parse_text(_query)}")
_chatbot.append((_parse_text(_query), ""))
full_response = ""
_query = f"{_prompt_template}\n{_query}"
for response in model.chat_stream(
tokenizer,
_query,
history=_task_history,
generation_config=config,
system=_prompt_system
):
_chatbot[-1] = (_parse_text(_query), _parse_text(response))
yield _chatbot
full_response = _parse_text(response)
print(f"History: {_task_history}")
_task_history.append((_query, full_response))
print(f"Qwen-Chat: {_parse_text(full_response)}")
def draw_image(_chatbot, _task_history, aspect_ratios_selection, num_inference_steps, ):
if len(_task_history) == 0:
return
prompt = _task_history[-1][-1]
if len(prompt) == 0:
return
print(f"===\n{_chatbot} \n\n{_task_history} ====\n")
height = int(aspect_ratios_selection.split("*")[1])
width = int(aspect_ratios_selection.split("*")[0])
print(f"{prompt} {height} * {width} {num_inference_steps}")
image_pil = image_pipe(
prompt=prompt,
num_inference_steps=num_inference_steps,
guidance_scale=0.0,
height=height,
width=width,
).images[0]
_save_image2html(image_pil, query=_chatbot[-1][0], prompt=prompt)
return image_pil
def regenerate(_chatbot, _task_history, _prompt_system):
if not _task_history:
yield _chatbot
return
item = _task_history.pop(-1)
_chatbot.pop(-1)
yield from predict(item[0], _chatbot, _task_history, _prompt_template="", _prompt_system=_prompt_system)
def reset_user_input():
return gr.update(value="")
def reset_state(_chatbot, _task_history):
_task_history.clear()
_chatbot.clear()
gc.collect()
torch.cuda.empty_cache()
return _chatbot
with gr.Blocks() as demo:
task_history = gr.State([])
with gr.Row():
with gr.Column(scale=1, min_width=600):
image = gr.Image(type="pil")
query = gr.Textbox(lines=2, label='Input')
with gr.Row():
empty_btn = gr.Button("🧹 Clear History (清除历史)")
submit_btn = gr.Button("🚀 Submit (生成)")
regen_btn = gr.Button("🤔️ Regenerate (重试)")
image_btn = gr.Button("🎨 Image (生成)")
talk_btn = gr.Button("💬 Talk (聊天)")
with gr.Column(scale=1, min_width=600):
with gr.Tab(label="Qwen"):
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.9,
label="Temperature",
info="越小越遵循输入,越大越充满想象"
)
prompt_system_radio = gr.Radio(
["中英文翻译", "文言文", "画家", "剧情", "AI助手"],
label='角色',
info="根据输入选择合适的角色"
)
with gr.Row():
prompt_system = gr.Textbox(
lines=1,
label='System Template',
value="你擅长翻译中文到英语。"
)
prompt_template = gr.Textbox(
lines=1,
label='Prompt Template',
value="必须使用英语根据主题描述一副画面:"
)
chatbot = gr.Chatbot(label='Qwen-Chat', elem_classes="control-height")
with gr.Tab(label="Config"):
with gr.Row():
top_p = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=1.0, label="Top-p")
top_k = gr.Slider(minimum=0, maximum=100, step=1, value=50, label="Top-k")
max_new_tokens = gr.Slider(minimum=1, maximum=1024, step=1, value=77, label="Max New Tokens")
repetition_penalty = gr.Slider(
minimum=1.0,
maximum=2.0,
step=0.01,
value=1.1,
label="repetition penalty",
info="重复惩罚"
)
aspect_ratios_selection = gr.Radio(
label='Aspect Ratios',
choices=conf.available_aspect_ratios,
value=conf.default_aspect_ratio,
info='width × height',
elem_classes='aspect_ratios'
)
num_inference_steps = gr.Slider(minimum=1, maximum=60, step=1, value=16, label="Image Steps")
with gr.Tab(label="History"):
file_server = f"http://{get_local_ip()}:{args.file_server_port}/"
html_file_path = f"{datetime.datetime.now().strftime('%Y-%m-%d')}.html"
html_fns = [fn for fn in os.listdir(OUTPUT_HTML_DIR) if fn.endswith(".html")]
gr.Markdown(f'<a href="{file_server}{html_file_path}" target="_blank">{html_file_path}</a>')
for fn in html_fns:
if fn == html_file_path:
continue
gr.Markdown(f'<a href="{file_server}{fn}" target="_blank">{fn}</a>')
PROMPT_SYSTEM_DICT = {
"中英文翻译": "你擅长翻译中文到英语。",
"文言文": "你擅长文言文翻译为英语。",
"画家": "你是绘画大师,擅长描绘画面细节。",
"剧情": "你是剧作家,擅长创作连续的漫画脚本。",
"AI助手": "You are a helpful assistant",
}
prompt_system_radio.change(lambda val: (PROMPT_SYSTEM_DICT[val]),
inputs=[prompt_system_radio], outputs=[prompt_system])
temperature.change(lambda val: config.update(temperature=val), inputs=[temperature], outputs=[])
top_k.change(lambda val: config.update(top_k=val), inputs=[top_k], outputs=[])
top_p.change(lambda val: config.update(top_p=val), inputs=[top_p], outputs=[])
max_new_tokens.change(
lambda val: config.update(max_new_tokens=val),
inputs=[max_new_tokens],
outputs=[],
)
repetition_penalty.change(
lambda val: config.update(repetition_penalty=val),
inputs=[repetition_penalty],
outputs=[],
)
talk_btn.click(predict, [query, chatbot, task_history, prompt_system], [chatbot],
show_progress=True)
submit_btn.click(predict, [query, chatbot, task_history, prompt_system, prompt_template], [chatbot],
show_progress=True)
submit_btn.click(reset_user_input, [], [query])
empty_btn.click(reset_state, [chatbot, task_history], outputs=[chatbot], show_progress=True)
image_btn.click(draw_image, [chatbot, task_history, aspect_ratios_selection, num_inference_steps],
outputs=[image],
show_progress=True)
regen_btn.click(regenerate, [chatbot, task_history, prompt_system], [chatbot], show_progress=True)
demo.queue().launch(
share=args.share,
inbrowser=args.inbrowser,
server_port=args.server_port,
server_name=args.server_name,
)
def main():
args = _get_args() | start_server(server_port=args.file_server_port) | 0 | 2023-12-06 06:04:29+00:00 | 4k |
jinxixiang/magic_animate_unofficial | animatediff/magic_animate/controlnet.py | [
{
"identifier": "TimestepEmbedding",
"path": "animatediff/magic_animate/embeddings.py",
"snippet": "class TimestepEmbedding(nn.Module):\n def __init__(\n self,\n in_channels: int,\n time_embed_dim: int,\n act_fn: str = \"silu\",\n out_dim: int = None,\n post_act_fn: Optional[str] = None,\n cond_proj_dim=None,\n ):\n super().__init__()\n\n self.linear_1 = nn.Linear(in_channels, time_embed_dim)\n\n if cond_proj_dim is not None:\n self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False)\n else:\n self.cond_proj = None\n\n if act_fn == \"silu\":\n self.act = nn.SiLU()\n elif act_fn == \"mish\":\n self.act = nn.Mish()\n elif act_fn == \"gelu\":\n self.act = nn.GELU()\n else:\n raise ValueError(f\"{act_fn} does not exist. Make sure to define one of 'silu', 'mish', or 'gelu'\")\n\n if out_dim is not None:\n time_embed_dim_out = out_dim\n else:\n time_embed_dim_out = time_embed_dim\n self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out)\n\n if post_act_fn is None:\n self.post_act = None\n elif post_act_fn == \"silu\":\n self.post_act = nn.SiLU()\n elif post_act_fn == \"mish\":\n self.post_act = nn.Mish()\n elif post_act_fn == \"gelu\":\n self.post_act = nn.GELU()\n else:\n raise ValueError(f\"{post_act_fn} does not exist. Make sure to define one of 'silu', 'mish', or 'gelu'\")\n\n def forward(self, sample, condition=None):\n if condition is not None:\n sample = sample + self.cond_proj(condition)\n sample = self.linear_1(sample)\n\n if self.act is not None:\n sample = self.act(sample)\n\n sample = self.linear_2(sample)\n\n if self.post_act is not None:\n sample = self.post_act(sample)\n return sample"
},
{
"identifier": "Timesteps",
"path": "animatediff/magic_animate/embeddings.py",
"snippet": "class Timesteps(nn.Module):\n def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float):\n super().__init__()\n self.num_channels = num_channels\n self.flip_sin_to_cos = flip_sin_to_cos\n self.downscale_freq_shift = downscale_freq_shift\n\n def forward(self, timesteps):\n t_emb = get_timestep_embedding(\n timesteps,\n self.num_channels,\n flip_sin_to_cos=self.flip_sin_to_cos,\n downscale_freq_shift=self.downscale_freq_shift,\n )\n return t_emb"
}
] | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from .embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.unet_2d_blocks import (
CrossAttnDownBlock2D,
DownBlock2D,
UNetMidBlock2DCrossAttn,
get_down_block,
)
from diffusers.models.unet_2d_condition import UNet2DConditionModel
import torch | 2,169 |
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class ControlNetOutput(BaseOutput):
down_block_res_samples: Tuple[torch.Tensor]
mid_block_res_sample: torch.Tensor
class ControlNetConditioningEmbedding(nn.Module):
"""
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
model) to encode image-space conditions ... into feature maps ..."
"""
def __init__(
self,
conditioning_embedding_channels: int,
conditioning_channels: int = 3,
block_out_channels: Tuple[int] = (16, 32, 96, 256),
):
super().__init__()
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
self.blocks = nn.ModuleList([])
for i in range(len(block_out_channels) - 1):
channel_in = block_out_channels[i]
channel_out = block_out_channels[i + 1]
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
self.conv_out = zero_module(
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
)
def forward(self, conditioning):
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding
class ControlNetModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
in_channels: int = 4,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
projection_class_embeddings_input_dim: Optional[int] = None,
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
):
super().__init__()
# Check inputs
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class ControlNetOutput(BaseOutput):
down_block_res_samples: Tuple[torch.Tensor]
mid_block_res_sample: torch.Tensor
class ControlNetConditioningEmbedding(nn.Module):
"""
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
model) to encode image-space conditions ... into feature maps ..."
"""
def __init__(
self,
conditioning_embedding_channels: int,
conditioning_channels: int = 3,
block_out_channels: Tuple[int] = (16, 32, 96, 256),
):
super().__init__()
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
self.blocks = nn.ModuleList([])
for i in range(len(block_out_channels) - 1):
channel_in = block_out_channels[i]
channel_out = block_out_channels[i + 1]
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
self.conv_out = zero_module(
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
)
def forward(self, conditioning):
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding
class ControlNetModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
in_channels: int = 4,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
projection_class_embeddings_input_dim: Optional[int] = None,
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
):
super().__init__()
# Check inputs
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
| self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) | 1 | 2023-12-12 00:16:39+00:00 | 4k |
Chat-3D/Chat-3D-v2 | utils/config_utils.py | [
{
"identifier": "Config",
"path": "utils/config.py",
"snippet": "class Config(object):\n \"\"\"config\"\"\"\n\n @classmethod\n def pretty_text(cls, cfg: dict, indent=2) -> str:\n \"\"\"format dict to a string\n\n Args:\n cfg (EasyDict): the params.\n\n Returns: The string to display.\n\n \"\"\"\n msg = \"{\\n\"\n for i, (k, v) in enumerate(cfg.items()):\n if isinstance(v, dict):\n v = cls.pretty_text(v, indent + 4)\n spaces = \" \" * indent\n msg += spaces + \"{}: {}\".format(k, v)\n if i == len(cfg) - 1:\n msg += \" }\"\n else:\n msg += \"\\n\"\n return msg\n\n @classmethod\n def dump(cls, cfg, savepath=None):\n \"\"\"dump cfg to `json` file.\n\n Args:\n cfg (dict): The dict to dump.\n savepath (str): The filepath to save the dumped dict.\n\n Returns: TODO\n\n \"\"\"\n if savepath is None:\n savepath = osp.join(cfg.WORKSPACE, \"config.json\")\n json.dump(cfg, open(savepath, \"w\"), indent=2)\n\n @classmethod\n def get_config(cls, default_config: dict = None):\n \"\"\"get a `Config` instance.\n\n Args:\n default_config (dict): The default config. `default_config` will be overrided\n by config file `--cfg`, `--cfg` will be overrided by commandline args.\n\n Returns: an EasyDict.\n \"\"\"\n global cfg\n if cfg is not None:\n return cfg\n\n # define arg parser.\n parser = argparse.ArgumentParser()\n # parser.add_argument(\"--cfg\", help=\"load configs from yaml file\", default=\"\", type=str)\n parser.add_argument(\n \"config_file\", help=\"the configuration file to load. support: .yaml, .json, .py\"\n )\n parser.add_argument(\n \"opts\",\n default=None,\n nargs=\"*\",\n help=\"overrided configs. List. Format: 'key1 name1 key2 name2'\",\n )\n args = parser.parse_args()\n\n cfg = EasyDict(BASE_CONFIG)\n if osp.isfile(args.config_file):\n cfg_from_file = cls.from_file(args.config_file)\n cfg = merge_a_into_b(cfg_from_file, cfg)\n cfg = cls.merge_list(cfg, args.opts)\n cfg = eval_dict_leaf(cfg)\n\n # update some keys to make them show at the last\n for k in BASE_CONFIG:\n cfg[k] = cfg.pop(k)\n return cfg\n\n @classmethod\n def from_file(cls, filepath: str) -> EasyDict:\n \"\"\"Build config from file. Supported filetypes: `.py`,`.yaml`,`.json`.\n\n Args:\n filepath (str): The config file path.\n\n Returns: TODO\n\n \"\"\"\n filepath = osp.abspath(osp.expanduser(filepath))\n if not osp.isfile(filepath):\n raise IOError(f\"File does not exist: {filepath}\")\n if filepath.endswith(\".py\"):\n with tempfile.TemporaryDirectory() as temp_config_dir:\n\n shutil.copytree(osp.dirname(filepath), osp.join(temp_config_dir, \"tmp_config\"))\n sys.path.insert(0, temp_config_dir)\n mod = import_module(\"tmp_config.\" + osp.splitext(osp.basename(filepath))[0])\n # mod = import_module(temp_module_name)\n sys.path.pop(0)\n cfg_dict = {\n name: value\n for name, value in mod.__dict__.items()\n if not name.startswith(\"__\")\n }\n for k in list(sys.modules.keys()):\n if \"tmp_config\" in k:\n del sys.modules[k]\n elif filepath.endswith((\".yml\", \".yaml\")):\n cfg_dict = yaml.load(open(filepath, \"r\"), Loader=yaml.Loader)\n elif filepath.endswith(\".json\"):\n cfg_dict = json.load(open(filepath, \"r\"))\n else:\n raise IOError(\"Only py/yml/yaml/json type are supported now!\")\n\n cfg_text = filepath + \"\\n\"\n with open(filepath, \"r\") as f:\n cfg_text += f.read()\n\n if BASE_KEY in cfg_dict: # load configs in `BASE_KEY`\n cfg_dir = osp.dirname(filepath)\n base_filename = cfg_dict.pop(BASE_KEY)\n base_filename = (\n base_filename if isinstance(base_filename, list) else [base_filename]\n )\n\n cfg_dict_list = list()\n for f in base_filename:\n _cfg_dict = Config.from_file(osp.join(cfg_dir, f))\n cfg_dict_list.append(_cfg_dict)\n\n base_cfg_dict = dict()\n for c in cfg_dict_list:\n if len(base_cfg_dict.keys() & c.keys()) > 0:\n raise KeyError(\"Duplicate key is not allowed among bases\")\n base_cfg_dict.update(c)\n\n cfg_dict = merge_a_into_b(cfg_dict, base_cfg_dict)\n\n return EasyDict(cfg_dict)\n\n @classmethod\n def merge_list(cls, cfg, opts: list):\n \"\"\"merge commandline opts.\n\n Args:\n cfg: (dict): The config to be merged.\n opts (list): The list to merge. Format: [key1, name1, key2, name2,...].\n The keys can be nested. For example, [\"a.b\", v] will be considered\n as `dict(a=dict(b=v))`.\n\n Returns: dict.\n\n \"\"\"\n assert len(opts) % 2 == 0, f\"length of opts must be even. Got: {opts}\"\n for _i in range(0, len(opts), 2):\n full_k, v = opts[_i], opts[_i + 1]\n keys = full_k.split(\".\")\n sub_d = cfg\n for i, k in enumerate(keys):\n if not hasattr(sub_d, k):\n raise ValueError(f\"The key {k} not exist in the config. Full key:{full_k}\")\n if i != len(keys) - 1:\n sub_d = sub_d[k]\n else:\n sub_d[k] = v\n return cfg"
},
{
"identifier": "init_distributed_mode",
"path": "utils/distributed.py",
"snippet": "def init_distributed_mode(args):\n if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n # job started by torch.distributed.launch\n args.rank = int(os.environ[\"RANK\"])\n args.world_size = int(os.environ['WORLD_SIZE'])\n args.gpu = int(os.environ['LOCAL_RANK'])\n elif 'SLURM_PROCID' in os.environ:\n # local rank on the current node / global rank\n local_rank = int(os.environ['SLURM_LOCALID'])\n global_rank = int(os.environ['SLURM_PROCID'])\n # number of processes / GPUs per node\n world_size = int(os.environ[\"SLURM_NNODES\"]) * \\\n int(os.environ[\"SLURM_TASKS_PER_NODE\"][0])\n\n print(world_size)\n\n args.rank = global_rank\n args.gpu = local_rank\n args.world_size = world_size\n else:\n logger.info('Not using distributed mode')\n args.distributed = False\n return\n\n args.distributed = True\n\n torch.cuda.set_device(args.gpu)\n args.dist_backend = 'nccl'\n\n if \"tcp\" in args.dist_url: # in slurm, multiple program runs in a single node\n dist_port = int(args.dist_url.split(\":\")[-1])\n while is_port_in_use(dist_port):\n dist_port += 10\n args.dist_url = \":\".join(args.dist_url.split(\":\")[:-1] + [str(dist_port)])\n\n logger.info('| distributed init (rank {}): {}'.format(\n args.rank, args.dist_url))\n if \"SLURM_JOB_ID\" in os.environ:\n logger.info(f\"SLURM_JOB_ID {os.environ['SLURM_JOB_ID']}\")\n torch.distributed.init_process_group(\n backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n torch.distributed.barrier()\n setup_for_distributed(args.rank == 0)"
},
{
"identifier": "is_main_process",
"path": "utils/distributed.py",
"snippet": "def is_main_process():\n return get_rank() == 0"
},
{
"identifier": "setup_logger",
"path": "utils/logger.py",
"snippet": "def setup_logger(\n output: str = None,\n color: bool = True,\n name: str = \"mmf\",\n disable: bool = False,\n clear_handlers=True,\n *args,\n **kwargs,\n):\n \"\"\"\n Initialize the MMF logger and set its verbosity level to \"INFO\".\n Outside libraries shouldn't call this in case they have set there\n own logging handlers and setup. If they do, and don't want to\n clear handlers, pass clear_handlers options.\n The initial version of this function was taken from D2 and adapted\n for MMF.\n Args:\n output (str): a file name or a directory to save log.\n If ends with \".txt\" or \".log\", assumed to be a file name.\n Default: Saved to file <save_dir/logs/log_[timestamp].txt>\n color (bool): If false, won't log colored logs. Default: true\n name (str): the root module name of this logger. Defaults to \"mmf\".\n disable: do not use\n clear_handlers (bool): If false, won't clear existing handlers.\n Returns:\n logging.Logger: a logger\n \"\"\"\n if disable:\n return None\n logger = logging.getLogger(name)\n logger.propagate = False\n\n logging.captureWarnings(True)\n warnings_logger = logging.getLogger(\"py.warnings\")\n\n plain_formatter = logging.Formatter(\n \"%(asctime)s | %(levelname)s | %(name)s : %(message)s\",\n datefmt=\"%Y-%m-%dT%H:%M:%S\",\n )\n\n distributed_rank = get_rank()\n handlers = []\n\n logging_level = logging.INFO\n # logging_level = logging.DEBUG\n\n if distributed_rank == 0:\n logger.setLevel(logging_level)\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(logging_level)\n if color:\n formatter = ColorfulFormatter(\n colored(\"%(asctime)s | %(name)s: \", \"green\") + \"%(message)s\",\n datefmt=\"%Y-%m-%dT%H:%M:%S\",\n )\n else:\n formatter = plain_formatter\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n warnings_logger.addHandler(ch)\n handlers.append(ch)\n\n # file logging: all workers\n if output is None:\n output = setup_output_folder()\n\n if output is not None:\n if output.endswith(\".txt\") or output.endswith(\".log\"):\n filename = output\n else:\n filename = os.path.join(output, \"train.log\")\n if distributed_rank > 0:\n filename = filename + f\".rank{distributed_rank}\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n fh = logging.StreamHandler(_cached_log_stream(filename))\n fh.setLevel(logging_level)\n fh.setFormatter(plain_formatter)\n logger.addHandler(fh)\n warnings_logger.addHandler(fh)\n handlers.append(fh)\n\n # Slurm/FB output, only log the main process\n # save_dir = get_mmf_env(key=\"save_dir\")\n if \"train.log\" not in filename and distributed_rank == 0:\n filename = os.path.join(output, \"train.log\")\n sh = logging.StreamHandler(_cached_log_stream(filename))\n sh.setLevel(logging_level)\n sh.setFormatter(plain_formatter)\n logger.addHandler(sh)\n warnings_logger.addHandler(sh)\n handlers.append(sh)\n\n logger.info(f\"Logging to: {filename}\")\n\n # Remove existing handlers to add MMF specific handlers\n if clear_handlers:\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n # Now, add our handlers.\n logging.basicConfig(level=logging_level, handlers=handlers)\n\n return logger"
}
] | import logging
import os
import sys
from os.path import dirname, join
from utils.config import Config
from utils.distributed import init_distributed_mode, is_main_process
from utils.logger import setup_logger | 3,272 |
logger = logging.getLogger(__name__)
def setup_config():
"""Conbine yaml config and command line config with OmegaConf.
Also converts types, e.g., `'None'` (str) --> `None` (None)
"""
config = Config.get_config()
if config.debug:
config.wandb.enable = False
return config
def setup_evaluate_config(config):
"""setup evaluation default settings, e.g., disable wandb"""
assert config.evaluate
config.wandb.enable = False
if config.output_dir is None:
config.output_dir = join(dirname(config.pretrained_path), "eval")
return config
def setup_output_dir(output_dir, excludes=["code"]):
"""ensure not overwritting an exisiting/non-empty output dir"""
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=False)
else:
existing_dirs_files = os.listdir(output_dir) # list
remaining = set(existing_dirs_files) - set(excludes)
remaining = [e for e in remaining if "slurm" not in e]
remaining = [e for e in remaining if ".out" not in e]
# assert len(remaining) == 0, f"remaining dirs or files: {remaining}"
logger.warn(f"remaining dirs or files: {remaining}")
def setup_main():
"""
Setup config, logger, output_dir, etc.
Shared for pretrain and all downstream tasks.
"""
config = setup_config()
if hasattr(config, "evaluate") and config.evaluate:
config = setup_evaluate_config(config)
|
logger = logging.getLogger(__name__)
def setup_config():
"""Conbine yaml config and command line config with OmegaConf.
Also converts types, e.g., `'None'` (str) --> `None` (None)
"""
config = Config.get_config()
if config.debug:
config.wandb.enable = False
return config
def setup_evaluate_config(config):
"""setup evaluation default settings, e.g., disable wandb"""
assert config.evaluate
config.wandb.enable = False
if config.output_dir is None:
config.output_dir = join(dirname(config.pretrained_path), "eval")
return config
def setup_output_dir(output_dir, excludes=["code"]):
"""ensure not overwritting an exisiting/non-empty output dir"""
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=False)
else:
existing_dirs_files = os.listdir(output_dir) # list
remaining = set(existing_dirs_files) - set(excludes)
remaining = [e for e in remaining if "slurm" not in e]
remaining = [e for e in remaining if ".out" not in e]
# assert len(remaining) == 0, f"remaining dirs or files: {remaining}"
logger.warn(f"remaining dirs or files: {remaining}")
def setup_main():
"""
Setup config, logger, output_dir, etc.
Shared for pretrain and all downstream tasks.
"""
config = setup_config()
if hasattr(config, "evaluate") and config.evaluate:
config = setup_evaluate_config(config) | init_distributed_mode(config) | 1 | 2023-12-11 14:39:58+00:00 | 4k |
SqueezeBits/owlite | owlite/options/options_dict.py | [
{
"identifier": "log",
"path": "owlite/logger.py",
"snippet": "class Logger(logging.Logger):\n class _WarningFilterContext:\n class WarningFilter(logging.Filter):\n ENV_VAR = \"OWLITE_LOG_LEVEL\"\n DEBUG_WARNING = 15\n ULTRA_VERBOSE = -10\n def ignore_warnings(self):\n def __init__(self, logger) -> None:\n def __enter__(self):\n def filter(self, record):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def debug_warning(self, msg, *args, **kwargs):\n def level(self) -> int:\n def level(self, value):\ndef suppress_owlite_warnings(cls):\n def new_init(self, *args, **kwargs):"
},
{
"identifier": "generic_isinstance",
"path": "owlite/options/generic_type_checking.py",
"snippet": "def generic_isinstance(obj: Any, type_hint: Union[type, tuple[type]]) -> bool:\n \"\"\"An extension for the builtin function `isinstance` for type hint checking.\"\"\"\n if isinstance(type_hint, tuple):\n return any(generic_isinstance(obj, t) for t in type_hint)\n\n origin_type = getattr(type_hint, \"__origin__\", None)\n if origin_type is None:\n return isinstance(obj, type_hint)\n value_types = get_args(type_hint)\n if origin_type is dict:\n value_type = value_types[0]\n return isinstance(obj, origin_type) and all(\n generic_isinstance(x, value_type) for x in obj.values()\n )\n if origin_type in (tuple, list):\n value_type = value_types[0]\n return isinstance(obj, origin_type) and all(\n generic_isinstance(x, value_type) for x in obj\n )\n if origin_type is Union:\n return generic_isinstance(obj, value_types)\n raise NotImplementedError(f\"generic_isinstance for {type_hint} is not implemented.\")"
},
{
"identifier": "load_json_or_yaml",
"path": "owlite/options/load.py",
"snippet": "def load_json_or_yaml(path_or_string_literal: str) -> Union[dict, CfgNode]:\n \"\"\"Loads either json or CfgNode from the given string.\n\n Args:\n path_or_string_literal (str): a string object containing either\n * the path to a \"*.json\" or \"*.yaml\" file; or\n * the content of the file in string\n\n Returns:\n Union[dict, CfgNode]: the loaded object\n \"\"\"\n try:\n if os.path.isfile(path_or_string_literal):\n with open(path_or_string_literal, encoding=\"utf-8\") as f:\n data = json.load(f)\n else:\n data = json.loads(path_or_string_literal)\n except json.JSONDecodeError:\n if os.path.isfile(path_or_string_literal):\n with open(path_or_string_literal, encoding=\"utf-8\") as f:\n data = CfgNode.load_cfg(f)\n else:\n data = CfgNode.load_cfg(path_or_string_literal)\n\n if not isinstance(data, (dict, CfgNode)):\n raise TypeError(\n f\"Expected either dict or CfgNode, but {data} of type {type(data)} is loaded.\"\n )\n\n return data"
},
{
"identifier": "OptionsMixin",
"path": "owlite/options/options_mixin.py",
"snippet": "class OptionsMixin:\n \"\"\"The Mixin-style base class for adding type-checking feature and custom value-checking feature.\"\"\"\n\n @classmethod\n def annotations(cls: type) -> dict[str, type]:\n \"\"\"Finds the type inferred from the type hint of each member variable from a given class\n\n Args:\n cls (type): a class\n\n Returns:\n dict[str, type]: the type annotations\n \"\"\"\n a = {}\n if issubclass(cls.__base__, OptionsMixin):\n a.update(cls.__base__.annotations())\n a.update(get_type_hints(cls))\n return a\n\n @classmethod\n def load(cls, d: Union[CfgNode, dict, str]) -> Self:\n \"\"\"Loads the OptionsMixin subclass instance from the given data\n\n Args:\n d (Union[CfgNode, dict, str]): one of\n * a CfgNode or dict object; or\n * a string object containing the representation of such object; or\n * the path to a file containing such representation.\n\n Raises:\n KeyError: if a required key is not found\n\n Returns:\n Self: an OptionsMixin object\n \"\"\"\n if isinstance(d, str):\n d = load_json_or_yaml(d)\n if not isinstance(d, dict):\n raise ValueError(f\"{cls} cannot load invalid value {d}\")\n\n kwargs = {}\n if not is_dataclass(cls) or not issubclass(cls, OptionsMixin):\n log.error(f\"A subclass of OptionsMixin must be decorated with dataclass, but {cls} is not\")\n raise TypeError(f\"{cls} must be decorated with dataclass\")\n\n for field in fields(cls):\n required = field.default is MISSING and field.default_factory is MISSING\n default_value = None\n if field.default is not MISSING:\n default_value = field.default\n elif field.default_factory is not MISSING:\n default_value = field.default_factory()\n\n if required and field.name not in d:\n raise KeyError(f\"Missing required key {field.name} in dictionary {d}\")\n\n value = d.get(field.name, default_value)\n kwargs[field.name] = cls._deserialize(value, field.type)\n return cls(**kwargs) # type: ignore[return-value]\n\n def __setattr__(self, name: str, value: Any) -> None:\n \"\"\"Check the type of the new value.\n\n Args:\n name (str): the name of a property\n value (Any): the new value for the property\n\n Raises:\n KeyError: if `name` is not a pre-defined attribute.\n ValueError: if a method named `f\"check_{name}\"` is found,\n `self.check_{name}(value)` is evaluated, and if the\n result is False, raises ValueError with message including the\n method's doc string.\n \"\"\"\n cls = self.__class__\n cls_name = cls.__name__\n annotations = cls.annotations()\n if name not in annotations:\n raise KeyError(f\"No such property in {cls_name}: {name}\")\n field_type = annotations[name]\n value = self._deserialize(value, field_type)\n if not generic_isinstance(value, field_type):\n raise ValueError(\n f\"Expected a value of type {field_type}, \"\n f\"but received {value} of type {type(value)} for {name} in {cls_name}\"\n )\n self._check(name, value)\n super().__setattr__(name, value)\n\n def _check(self, attr: str, new_value: Any) -> None:\n checker = getattr(self, f\"check_{attr}\", None)\n if checker and inspect.ismethod(checker) and not checker(new_value):\n msg = f\"Invalid value {new_value} given to {attr} of {self.__class__.__name__}\"\n doc_string = getattr(checker, \"__doc__\", None)\n if doc_string is not None:\n msg += f\":\\n{doc_string}\"\n raise ValueError(msg)\n\n @property\n def config(self) -> CfgNode:\n \"\"\"CfgNode representation for this object, which you can use it for writing it to a yaml file by\n ```python\n with open(\"config.yaml\", \"w\") as f:\n f.write(options.config.dump())\n ```\n \"\"\"\n return CfgNode(init_dict=self.json)\n\n @property\n def json(self) -> dict:\n \"\"\"Builtin dictionary representation for this object, which you can use it for writing it to a json file\n with the `json.dump` or `json.dumps` function\"\"\"\n d = {}\n for field in fields(type(self)): # type: ignore[arg-type]\n field_value = getattr(self, field.name)\n d[field.name] = self._serialize(field_value)\n return d\n\n @classmethod\n def _deserialize(cls, x: object, t: type) -> object:\n type_error_message = (\n \"A field type of OptionsMixin must\"\n \"\\ni) be one of bool, int, float or str; or\"\n \"\\nii) be a subclass of enum.Enum; or\"\n \"\\niii) be a subclass of OptionsMixin; or\"\n \"\\niv) be of the form Optional[T] where T satisfies one of i), ii) or iii)\"\n \"\\nv) be of the form list[T] where T satisfies one of i), ii) or iii)\"\n )\n type_error_desc = f\"Unsupported field type {t} found {cls}.\"\n if generic_isinstance(x, t):\n return x\n if is_optional(t):\n return None if x is None else cls._deserialize(x, unwrap_optional(t))\n origin = get_origin(t)\n if origin is list and isinstance(x, list):\n element_type = get_args(t)[0]\n return [cls._deserialize(item, element_type) for item in x]\n if origin is not None:\n log.error(type_error_message)\n raise TypeError(type_error_desc)\n if issubclass(t, Enum):\n return t[x] if isinstance(x, str) else t(x)\n if issubclass(t, OptionsMixin):\n return t.load(x)\n if t not in (int, float, str, bool):\n log.error(type_error_message)\n raise TypeError(type_error_desc)\n return t(x) # type: ignore[call-arg]\n\n @classmethod\n def _serialize(cls, x: object) -> object:\n if isinstance(x, OptionsMixin):\n return x.json\n if isinstance(x, Enum):\n return x.name\n if isinstance(x, dict):\n return {key: cls._serialize(value) for key, value in x.items()}\n if isinstance(x, list):\n return [cls._serialize(value) for value in x]\n return x"
}
] | from dataclasses import fields, is_dataclass
from types import NoneType
from typing import Any, Union, get_args, get_origin
from yacs.config import CfgNode
from ..logger import log
from .generic_type_checking import generic_isinstance
from .load import load_json_or_yaml
from .options_mixin import OptionsMixin | 3,361 | """Options required for configuring torch.fx.GraphModule"""
class OptionsDict(dict, OptionsMixin):
"""A simple extension of python `dict` to hold Options as values"""
ValueType: type
def __init__(self, d: Union[CfgNode, dict, str, NoneType] = None):
# Required for checking if ValueType is valid
_ = type(self).value_types()
super(dict, self).__init__()
if d is None:
return
for k, v in type(self).load(d).items():
self[k] = v
def update(self, d: dict):
for key, value in d.items():
self[key] = value
@classmethod
def value_types(cls) -> tuple[type[OptionsMixin]]:
"""Allowed value types of this class in tuple"""
if hasattr(cls, "_value_types"):
# prevent duplicate type-checking
return cls._value_types
if not hasattr(cls, "ValueType"):
log.error(
"A subclass of OptionsDict requires a static type (or type union) `ValueType` "
"indicating the possible value types of the subclass"
)
raise AttributeError(f"ValueType for {cls} is not defined")
origin = get_origin(cls.ValueType)
full_type_error_message = (
f"The type (union) ValueType of {cls} must be one of the followings:"
"\ni) a subclass of OptionsMixin decorated with dataclass; or"
"\nii) the list, tuple, Optional or Union of type(s) satisfying i); or"
"\niii) a subclass of OptionsDict,\n"
f"but {cls.__name__}.ValueType={cls.ValueType} is given."
)
type_error_message = f"Invalid ValueType {cls.ValueType} defined for {cls}"
if origin in (Union, list, tuple):
args = get_args(cls.ValueType)
if not all((issubclass(c, OptionsMixin) and is_dataclass(c)) or c is NoneType for c in args):
log.error(full_type_error_message)
raise TypeError(type_error_message)
elif origin is None:
if not (
(issubclass(cls.ValueType, OptionsMixin) and is_dataclass(cls.ValueType))
or issubclass(cls.ValueType, OptionsDict)
):
log.error(full_type_error_message)
raise TypeError(type_error_message)
args = (cls.ValueType,)
else:
raise TypeError(f"The type hint origin {origin} is not supported - {cls}.ValueType = {cls.ValueType}")
cls._value_types = args
return args
@classmethod
def load(cls, d: Union[dict, list, str, tuple, NoneType]) -> Any:
options_dict = cls()
value_types = cls.value_types()
origin = get_origin(cls.ValueType)
def load(name: str, data):
if generic_isinstance(data, cls.ValueType):
options_dict[name] = data
return
if origin in (Union, None):
if data is None and NoneType in value_types:
options_dict[name] = None
return
if not isinstance(data, dict):
raise TypeError(f"Expected dict but got {data}")
subnode_key_set = set(data.keys())
for option_type in value_types:
if issubclass(option_type, OptionsDict):
options_dict[name] = option_type.load(data)
break
if subnode_key_set == {field.name for field in fields(option_type)}:
options_dict[name] = option_type.load(data)
break
else:
raise ValueError(
f"Failed to parse config for node {name}: "
f"no matching options class for {data}. ({cls.__name__}.ValueType = {cls.ValueType})"
)
return
if origin in (list, tuple) and isinstance(data, (tuple, list)):
if len(value_types) != 1:
raise TypeError(
"When ValueType of a subclass of OptionsDict is either list or tuple, "
f"its element type must be specified, but {cls.__name__}.ValueType = {cls.ValueType}"
)
option_type = value_types[0]
options_dict[name] = origin(option_type.load(item) for item in data)
return
raise ValueError(f"{cls} cannot load the invalid value {data} at key={name}")
if isinstance(d, str):
| """Options required for configuring torch.fx.GraphModule"""
class OptionsDict(dict, OptionsMixin):
"""A simple extension of python `dict` to hold Options as values"""
ValueType: type
def __init__(self, d: Union[CfgNode, dict, str, NoneType] = None):
# Required for checking if ValueType is valid
_ = type(self).value_types()
super(dict, self).__init__()
if d is None:
return
for k, v in type(self).load(d).items():
self[k] = v
def update(self, d: dict):
for key, value in d.items():
self[key] = value
@classmethod
def value_types(cls) -> tuple[type[OptionsMixin]]:
"""Allowed value types of this class in tuple"""
if hasattr(cls, "_value_types"):
# prevent duplicate type-checking
return cls._value_types
if not hasattr(cls, "ValueType"):
log.error(
"A subclass of OptionsDict requires a static type (or type union) `ValueType` "
"indicating the possible value types of the subclass"
)
raise AttributeError(f"ValueType for {cls} is not defined")
origin = get_origin(cls.ValueType)
full_type_error_message = (
f"The type (union) ValueType of {cls} must be one of the followings:"
"\ni) a subclass of OptionsMixin decorated with dataclass; or"
"\nii) the list, tuple, Optional or Union of type(s) satisfying i); or"
"\niii) a subclass of OptionsDict,\n"
f"but {cls.__name__}.ValueType={cls.ValueType} is given."
)
type_error_message = f"Invalid ValueType {cls.ValueType} defined for {cls}"
if origin in (Union, list, tuple):
args = get_args(cls.ValueType)
if not all((issubclass(c, OptionsMixin) and is_dataclass(c)) or c is NoneType for c in args):
log.error(full_type_error_message)
raise TypeError(type_error_message)
elif origin is None:
if not (
(issubclass(cls.ValueType, OptionsMixin) and is_dataclass(cls.ValueType))
or issubclass(cls.ValueType, OptionsDict)
):
log.error(full_type_error_message)
raise TypeError(type_error_message)
args = (cls.ValueType,)
else:
raise TypeError(f"The type hint origin {origin} is not supported - {cls}.ValueType = {cls.ValueType}")
cls._value_types = args
return args
@classmethod
def load(cls, d: Union[dict, list, str, tuple, NoneType]) -> Any:
options_dict = cls()
value_types = cls.value_types()
origin = get_origin(cls.ValueType)
def load(name: str, data):
if generic_isinstance(data, cls.ValueType):
options_dict[name] = data
return
if origin in (Union, None):
if data is None and NoneType in value_types:
options_dict[name] = None
return
if not isinstance(data, dict):
raise TypeError(f"Expected dict but got {data}")
subnode_key_set = set(data.keys())
for option_type in value_types:
if issubclass(option_type, OptionsDict):
options_dict[name] = option_type.load(data)
break
if subnode_key_set == {field.name for field in fields(option_type)}:
options_dict[name] = option_type.load(data)
break
else:
raise ValueError(
f"Failed to parse config for node {name}: "
f"no matching options class for {data}. ({cls.__name__}.ValueType = {cls.ValueType})"
)
return
if origin in (list, tuple) and isinstance(data, (tuple, list)):
if len(value_types) != 1:
raise TypeError(
"When ValueType of a subclass of OptionsDict is either list or tuple, "
f"its element type must be specified, but {cls.__name__}.ValueType = {cls.ValueType}"
)
option_type = value_types[0]
options_dict[name] = origin(option_type.load(item) for item in data)
return
raise ValueError(f"{cls} cannot load the invalid value {data} at key={name}")
if isinstance(d, str): | d = load_json_or_yaml(d) | 2 | 2023-12-08 06:41:50+00:00 | 4k |
ximinng/PyTorch-SVGRender | pytorch_svgrender/libs/metric/piq/perceptual.py | [
{
"identifier": "_validate_input",
"path": "pytorch_svgrender/libs/metric/piq/utils/common.py",
"snippet": "def _validate_input(\n tensors: List[torch.Tensor],\n dim_range: Tuple[int, int] = (0, -1),\n data_range: Tuple[float, float] = (0., -1.),\n # size_dim_range: Tuple[float, float] = (0., -1.),\n size_range: Optional[Tuple[int, int]] = None,\n) -> None:\n r\"\"\"Check that input(-s) satisfies the requirements\n Args:\n tensors: Tensors to check\n dim_range: Allowed number of dimensions. (min, max)\n data_range: Allowed range of values in tensors. (min, max)\n size_range: Dimensions to include in size comparison. (start_dim, end_dim + 1)\n \"\"\"\n\n if not __debug__:\n return\n\n x = tensors[0]\n\n for t in tensors:\n assert torch.is_tensor(t), f'Expected torch.Tensor, got {type(t)}'\n assert t.device == x.device, f'Expected tensors to be on {x.device}, got {t.device}'\n\n if size_range is None:\n assert t.size() == x.size(), f'Expected tensors with same size, got {t.size()} and {x.size()}'\n else:\n assert t.size()[size_range[0]: size_range[1]] == x.size()[size_range[0]: size_range[1]], \\\n f'Expected tensors with same size at given dimensions, got {t.size()} and {x.size()}'\n\n if dim_range[0] == dim_range[1]:\n assert t.dim() == dim_range[0], f'Expected number of dimensions to be {dim_range[0]}, got {t.dim()}'\n elif dim_range[0] < dim_range[1]:\n assert dim_range[0] <= t.dim() <= dim_range[1], \\\n f'Expected number of dimensions to be between {dim_range[0]} and {dim_range[1]}, got {t.dim()}'\n\n if data_range[0] < data_range[1]:\n assert data_range[0] <= t.min(), \\\n f'Expected values to be greater or equal to {data_range[0]}, got {t.min()}'\n assert t.max() <= data_range[1], \\\n f'Expected values to be lower or equal to {data_range[1]}, got {t.max()}'"
},
{
"identifier": "_reduce",
"path": "pytorch_svgrender/libs/metric/piq/utils/common.py",
"snippet": "def _reduce(x: torch.Tensor, reduction: str = 'mean') -> torch.Tensor:\n r\"\"\"Reduce input in batch dimension if needed.\n\n Args:\n x: Tensor with shape (N, *).\n reduction: Specifies the reduction type:\n ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``\n \"\"\"\n if reduction == 'none':\n return x\n elif reduction == 'mean':\n return x.mean(dim=0)\n elif reduction == 'sum':\n return x.sum(dim=0)\n else:\n raise ValueError(\"Unknown reduction. Expected one of {'none', 'mean', 'sum'}\")"
},
{
"identifier": "similarity_map",
"path": "pytorch_svgrender/libs/metric/piq/functional/base.py",
"snippet": "def similarity_map(map_x: torch.Tensor, map_y: torch.Tensor, constant: float, alpha: float = 0.0) -> torch.Tensor:\n r\"\"\" Compute similarity_map between two tensors using Dice-like equation.\n\n Args:\n map_x: Tensor with map to be compared\n map_y: Tensor with map to be compared\n constant: Used for numerical stability\n alpha: Masking coefficient. Subtracts - `alpha` * map_x * map_y from denominator and nominator\n \"\"\"\n return (2.0 * map_x * map_y - alpha * map_x * map_y + constant) / \\\n (map_x ** 2 + map_y ** 2 - alpha * map_x * map_y + constant)"
},
{
"identifier": "L2Pool2d",
"path": "pytorch_svgrender/libs/metric/piq/functional/layers.py",
"snippet": "class L2Pool2d(torch.nn.Module):\n r\"\"\"Applies L2 pooling with Hann window of size 3x3\n Args:\n x: Tensor with shape (N, C, H, W)\"\"\"\n EPS = 1e-12\n\n def __init__(self, kernel_size: int = 3, stride: int = 2, padding=1) -> None:\n super().__init__()\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n\n self.kernel: Optional[torch.Tensor] = None\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if self.kernel is None:\n C = x.size(1)\n self.kernel = hann_filter(self.kernel_size).repeat((C, 1, 1, 1)).to(x)\n\n out = torch.nn.functional.conv2d(\n x ** 2, self.kernel,\n stride=self.stride,\n padding=self.padding,\n groups=x.shape[1]\n )\n return (out + self.EPS).sqrt()"
}
] | from typing import List, Union, Collection
from torch.nn.modules.loss import _Loss
from torchvision.models import vgg16, vgg19, VGG16_Weights, VGG19_Weights
from .utils import _validate_input, _reduce
from .functional import similarity_map, L2Pool2d
import torch
import torch.nn as nn | 3,409 | "pool5": '36',
}
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
# Constant used in feature normalization to avoid zero division
EPS = 1e-10
class ContentLoss(_Loss):
r"""Creates Content loss that can be used for image style transfer or as a measure for image to image tasks.
Uses pretrained VGG models from torchvision.
Expects input to be in range [0, 1] or normalized with ImageNet statistics into range [-1, 1]
Args:
feature_extractor: Model to extract features or model name: ``'vgg16'`` | ``'vgg19'``.
layers: List of strings with layer names. Default: ``'relu3_3'``
weights: List of float weight to balance different layers
replace_pooling: Flag to replace MaxPooling layer with AveragePooling. See references for details.
distance: Method to compute distance between features: ``'mse'`` | ``'mae'``.
reduction: Specifies the reduction type:
``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'``
mean: List of float values used for data standardization. Default: ImageNet mean.
If there is no need to normalize data, use [0., 0., 0.].
std: List of float values used for data standardization. Default: ImageNet std.
If there is no need to normalize data, use [1., 1., 1.].
normalize_features: If true, unit-normalize each feature in channel dimension before scaling
and computing distance. See references for details.
Examples:
>>> loss = ContentLoss()
>>> x = torch.rand(3, 3, 256, 256, requires_grad=True)
>>> y = torch.rand(3, 3, 256, 256)
>>> output = loss(x, y)
>>> output.backward()
References:
Gatys, Leon and Ecker, Alexander and Bethge, Matthias (2016).
A Neural Algorithm of Artistic Style
Association for Research in Vision and Ophthalmology (ARVO)
https://arxiv.org/abs/1508.06576
Zhang, Richard and Isola, Phillip and Efros, et al. (2018)
The Unreasonable Effectiveness of Deep Features as a Perceptual Metric
IEEE/CVF Conference on Computer Vision and Pattern Recognition
https://arxiv.org/abs/1801.03924
"""
def __init__(self, feature_extractor: Union[str, torch.nn.Module] = "vgg16", layers: Collection[str] = ("relu3_3",),
weights: List[Union[float, torch.Tensor]] = [1.], replace_pooling: bool = False,
distance: str = "mse", reduction: str = "mean", mean: List[float] = IMAGENET_MEAN,
std: List[float] = IMAGENET_STD, normalize_features: bool = False,
allow_layers_weights_mismatch: bool = False) -> None:
assert allow_layers_weights_mismatch or len(layers) == len(weights), \
f'Lengths of provided layers and weighs mismatch ({len(weights)} weights and {len(layers)} layers), ' \
f'which will cause incorrect results. Please provide weight for each layer.'
super().__init__()
if callable(feature_extractor):
self.model = feature_extractor
self.layers = layers
else:
if feature_extractor == "vgg16":
# self.model = vgg16(pretrained=True, progress=False).features
self.model = vgg16(weights=VGG16_Weights.DEFAULT, progress=False).features
self.layers = [VGG16_LAYERS[l] for l in layers]
elif feature_extractor == "vgg19":
# self.model = vgg19(pretrained=True, progress=False).features
self.model = vgg19(weights=VGG19_Weights.DEFAULT, progress=False).features
self.layers = [VGG19_LAYERS[l] for l in layers]
else:
raise ValueError("Unknown feature extractor")
if replace_pooling:
self.model = self.replace_pooling(self.model)
# Disable gradients
for param in self.model.parameters():
param.requires_grad_(False)
self.distance = {
"mse": nn.MSELoss,
"mae": nn.L1Loss,
}[distance](reduction='none')
self.weights = [torch.tensor(w) if not isinstance(w, torch.Tensor) else w for w in weights]
mean = torch.tensor(mean)
std = torch.tensor(std)
self.mean = mean.view(1, -1, 1, 1)
self.std = std.view(1, -1, 1, 1)
self.normalize_features = normalize_features
self.reduction = reduction
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
r"""Computation of Content loss between feature representations of prediction :math:`x` and
target :math:`y` tensors.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Content loss between feature representations
"""
_validate_input([x, y], dim_range=(4, 4), data_range=(0, -1))
self.model.to(x)
x_features = self.get_features(x)
y_features = self.get_features(y)
distances = self.compute_distance(x_features, y_features)
# Scale distances, then average in spatial dimensions, then stack and sum in channels dimension
loss = torch.cat([(d * w.to(d)).mean(dim=[2, 3]) for d, w in zip(distances, self.weights)], dim=1).sum(dim=1)
| """
Implementation of Content loss, Style loss, LPIPS and DISTS metrics
References:
.. [1] Gatys, Leon and Ecker, Alexander and Bethge, Matthias
(2016). A Neural Algorithm of Artistic Style}
Association for Research in Vision and Ophthalmology (ARVO)
https://arxiv.org/abs/1508.06576
.. [2] Zhang, Richard and Isola, Phillip and Efros, et al.
(2018) The Unreasonable Effectiveness of Deep Features as a Perceptual Metric
2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition
https://arxiv.org/abs/1801.03924
"""
# Map VGG names to corresponding number in torchvision layer
VGG16_LAYERS = {
"conv1_1": '0', "relu1_1": '1',
"conv1_2": '2', "relu1_2": '3',
"pool1": '4',
"conv2_1": '5', "relu2_1": '6',
"conv2_2": '7', "relu2_2": '8',
"pool2": '9',
"conv3_1": '10', "relu3_1": '11',
"conv3_2": '12', "relu3_2": '13',
"conv3_3": '14', "relu3_3": '15',
"pool3": '16',
"conv4_1": '17', "relu4_1": '18',
"conv4_2": '19', "relu4_2": '20',
"conv4_3": '21', "relu4_3": '22',
"pool4": '23',
"conv5_1": '24', "relu5_1": '25',
"conv5_2": '26', "relu5_2": '27',
"conv5_3": '28', "relu5_3": '29',
"pool5": '30',
}
VGG19_LAYERS = {
"conv1_1": '0', "relu1_1": '1',
"conv1_2": '2', "relu1_2": '3',
"pool1": '4',
"conv2_1": '5', "relu2_1": '6',
"conv2_2": '7', "relu2_2": '8',
"pool2": '9',
"conv3_1": '10', "relu3_1": '11',
"conv3_2": '12', "relu3_2": '13',
"conv3_3": '14', "relu3_3": '15',
"conv3_4": '16', "relu3_4": '17',
"pool3": '18',
"conv4_1": '19', "relu4_1": '20',
"conv4_2": '21', "relu4_2": '22',
"conv4_3": '23', "relu4_3": '24',
"conv4_4": '25', "relu4_4": '26',
"pool4": '27',
"conv5_1": '28', "relu5_1": '29',
"conv5_2": '30', "relu5_2": '31',
"conv5_3": '32', "relu5_3": '33',
"conv5_4": '34', "relu5_4": '35',
"pool5": '36',
}
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
# Constant used in feature normalization to avoid zero division
EPS = 1e-10
class ContentLoss(_Loss):
r"""Creates Content loss that can be used for image style transfer or as a measure for image to image tasks.
Uses pretrained VGG models from torchvision.
Expects input to be in range [0, 1] or normalized with ImageNet statistics into range [-1, 1]
Args:
feature_extractor: Model to extract features or model name: ``'vgg16'`` | ``'vgg19'``.
layers: List of strings with layer names. Default: ``'relu3_3'``
weights: List of float weight to balance different layers
replace_pooling: Flag to replace MaxPooling layer with AveragePooling. See references for details.
distance: Method to compute distance between features: ``'mse'`` | ``'mae'``.
reduction: Specifies the reduction type:
``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'``
mean: List of float values used for data standardization. Default: ImageNet mean.
If there is no need to normalize data, use [0., 0., 0.].
std: List of float values used for data standardization. Default: ImageNet std.
If there is no need to normalize data, use [1., 1., 1.].
normalize_features: If true, unit-normalize each feature in channel dimension before scaling
and computing distance. See references for details.
Examples:
>>> loss = ContentLoss()
>>> x = torch.rand(3, 3, 256, 256, requires_grad=True)
>>> y = torch.rand(3, 3, 256, 256)
>>> output = loss(x, y)
>>> output.backward()
References:
Gatys, Leon and Ecker, Alexander and Bethge, Matthias (2016).
A Neural Algorithm of Artistic Style
Association for Research in Vision and Ophthalmology (ARVO)
https://arxiv.org/abs/1508.06576
Zhang, Richard and Isola, Phillip and Efros, et al. (2018)
The Unreasonable Effectiveness of Deep Features as a Perceptual Metric
IEEE/CVF Conference on Computer Vision and Pattern Recognition
https://arxiv.org/abs/1801.03924
"""
def __init__(self, feature_extractor: Union[str, torch.nn.Module] = "vgg16", layers: Collection[str] = ("relu3_3",),
weights: List[Union[float, torch.Tensor]] = [1.], replace_pooling: bool = False,
distance: str = "mse", reduction: str = "mean", mean: List[float] = IMAGENET_MEAN,
std: List[float] = IMAGENET_STD, normalize_features: bool = False,
allow_layers_weights_mismatch: bool = False) -> None:
assert allow_layers_weights_mismatch or len(layers) == len(weights), \
f'Lengths of provided layers and weighs mismatch ({len(weights)} weights and {len(layers)} layers), ' \
f'which will cause incorrect results. Please provide weight for each layer.'
super().__init__()
if callable(feature_extractor):
self.model = feature_extractor
self.layers = layers
else:
if feature_extractor == "vgg16":
# self.model = vgg16(pretrained=True, progress=False).features
self.model = vgg16(weights=VGG16_Weights.DEFAULT, progress=False).features
self.layers = [VGG16_LAYERS[l] for l in layers]
elif feature_extractor == "vgg19":
# self.model = vgg19(pretrained=True, progress=False).features
self.model = vgg19(weights=VGG19_Weights.DEFAULT, progress=False).features
self.layers = [VGG19_LAYERS[l] for l in layers]
else:
raise ValueError("Unknown feature extractor")
if replace_pooling:
self.model = self.replace_pooling(self.model)
# Disable gradients
for param in self.model.parameters():
param.requires_grad_(False)
self.distance = {
"mse": nn.MSELoss,
"mae": nn.L1Loss,
}[distance](reduction='none')
self.weights = [torch.tensor(w) if not isinstance(w, torch.Tensor) else w for w in weights]
mean = torch.tensor(mean)
std = torch.tensor(std)
self.mean = mean.view(1, -1, 1, 1)
self.std = std.view(1, -1, 1, 1)
self.normalize_features = normalize_features
self.reduction = reduction
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
r"""Computation of Content loss between feature representations of prediction :math:`x` and
target :math:`y` tensors.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Content loss between feature representations
"""
_validate_input([x, y], dim_range=(4, 4), data_range=(0, -1))
self.model.to(x)
x_features = self.get_features(x)
y_features = self.get_features(y)
distances = self.compute_distance(x_features, y_features)
# Scale distances, then average in spatial dimensions, then stack and sum in channels dimension
loss = torch.cat([(d * w.to(d)).mean(dim=[2, 3]) for d, w in zip(distances, self.weights)], dim=1).sum(dim=1)
| return _reduce(loss, self.reduction) | 1 | 2023-12-13 08:18:01+00:00 | 4k |
lyhisme/DeST | libs/models/DeST_linearformer.py | [
{
"identifier": "SingleStageTCN",
"path": "libs/models/tcn.py",
"snippet": "class SingleStageTCN(nn.Module):\n def __init__(\n self,\n in_channel: int,\n n_features: int,\n n_classes: int,\n n_layers: int,\n **kwargs: Any\n ) -> None:\n super().__init__()\n self.conv_in = nn.Conv1d(in_channel, n_features, 1)\n layers = [\n DilatedResidualLayer(2 ** i, n_features, n_features)\n for i in range(n_layers)\n ]\n self.layers = nn.ModuleList(layers)\n self.conv_out = nn.Conv1d(n_features, n_classes, 1)\n\n def forward(self, x: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n out = self.conv_in(x)\n for layer in self.layers:\n out = layer(out, mask)\n out = self.conv_out(out)\n return out * mask[:, 0:1, :]"
},
{
"identifier": "MultiScale_GraphConv",
"path": "libs/models/SP.py",
"snippet": "class MultiScale_GraphConv(nn.Module):\n def __init__(self,\n num_scales, # 13\n in_channels,\n out_channels,\n dataset,\n disentangled_agg=True,\n use_mask=True,\n dropout=0,\n activation='relu'):\n super().__init__()\n\n self.graph = Graph(labeling_mode='spatial', layout=dataset)\n neighbor = self.graph.neighbor\n self.num_scales = num_scales\n \n if dataset == 'LARA':\n A_binary = get_adjacency_matrix(neighbor, 19) \n else:\n A_binary = get_adjacency_matrix(neighbor, 25) \n \n if disentangled_agg:\n A_powers = [k_adjacency(A_binary, k, with_self=True) for k in range(num_scales)]\n A_powers = np.concatenate([normalize_adjacency_matrix(g) for g in A_powers])\n else:\n A_powers = [A_binary + np.eye(len(A_binary)) for k in range(num_scales)]\n A_powers = [normalize_adjacency_matrix(g) for g in A_powers]\n A_powers = [np.linalg.matrix_power(g, k) for k, g in enumerate(A_powers)]\n A_powers = np.concatenate(A_powers)\n\n self.A_powers = torch.Tensor(A_powers)\n self.use_mask = use_mask\n if use_mask:\n # NOTE: the inclusion of residual mask appears to slow down training noticeably\n self.A_res = nn.init.uniform_(nn.Parameter(torch.Tensor(self.A_powers.shape)), -1e-6, 1e-6)\n\n self.mlp = MLP(in_channels * num_scales, [out_channels], dropout=dropout, activation=activation)\n \n def forward(self, x):\n x = x.transpose(2, 3)\n N, C, T, V = x.shape\n self.A_powers = self.A_powers.to(x.device)\n A = self.A_powers.to(x.dtype)\n if self.use_mask:\n A = A + self.A_res.to(x.dtype)\n \n support = torch.einsum('vu,nctu->nctv', A, x)\n support = support.view(N, C, T, self.num_scales, V)\n support = support.permute(0,3,1,2,4).contiguous().view(N, self.num_scales*C, T, V)\n out = self.mlp(support)\n return out"
}
] | from typing import Any, Optional, Tuple
from .tcn import SingleStageTCN
from .SP import MultiScale_GraphConv
import torch
import torch.nn as nn
import copy
import math | 2,681 |
out = self.conv_out(out)
out = self.dropout(out)
return (x + out) * mask
class SFI(nn.Module):
def __init__(self, in_channel, n_features):
super().__init__()
self.conv_s = nn.Conv1d(in_channel, n_features, 1)
self.softmax = nn.Softmax(dim=-1)
self.ff = nn.Sequential(nn.Linear(n_features, n_features),
nn.GELU(),
nn.Dropout(0.3),
nn.Linear(n_features, n_features))
def forward(self, feature_s, feature_t, mask):
feature_s = feature_s.permute(0, 2, 1)
n, c, t = feature_s.shape
feature_s = self.conv_s(feature_s)
map = self.softmax(torch.einsum("nct,ndt->ncd", feature_s, feature_t)/t)
feature_cross = torch.einsum("ncd,ndt->nct", map, feature_t)
feature_cross = feature_cross + feature_t
feature_cross = feature_cross.permute(0, 2, 1)
feature_cross = self.ff(feature_cross).permute(0, 2, 1) + feature_t
return feature_cross * mask
class STI(nn.Module):
def __init__(self, node, in_channel, n_features, out_channel, num_layers, SFI_layer, channel_masking_rate=0.3, alpha=1):
super().__init__()
self.SFI_layer = SFI_layer
num_SFI_layers = len(SFI_layer)
self.channel_masking_rate = channel_masking_rate
self.dropout = nn.Dropout2d(p=channel_masking_rate)
self.conv_in = nn.Conv2d(in_channel, num_SFI_layers+1, kernel_size=1)
self.conv_t = nn.Conv1d(node, n_features, 1)
self.SFI_layers = nn.ModuleList(
[SFI(node, n_features) for i in range(num_SFI_layers)])
self.layers = nn.ModuleList(
[AttModule(2 ** i, n_features, n_features, 'encoder', alpha) for i in
range(num_layers)])
self.conv_out = nn.Conv1d(n_features, out_channel, 1)
def forward(self, x, mask):
if self.channel_masking_rate > 0:
x = self.dropout(x)
count = 0
x = self.conv_in(x)
feature_s, feature_t = torch.split(x, (len(self.SFI_layers), 1), dim=1)
feature_t = feature_t.squeeze(1).permute(0, 2, 1)
feature_st = self.conv_t(feature_t)
for index, layer in enumerate(self.layers):
if index in self.SFI_layer:
feature_st = self.SFI_layers[count](feature_s[:,count,:], feature_st, mask)
count+=1
feature_st = layer(feature_st, None, mask)
feature_st = self.conv_out(feature_st)
return feature_st * mask
class Decoder(nn.Module):
def __init__(self, in_channel, n_features, out_channel, num_layers, alpha=1):
super().__init__()
self.conv_in = nn.Conv1d(in_channel, n_features, 1)
self.layers = nn.ModuleList(
[AttModule(2 ** i, n_features, n_features, 'decoder', alpha) for i in
range(num_layers)])
self.conv_out = nn.Conv1d(n_features, out_channel, 1)
def forward(self, x, fencoder, mask):
feature = self.conv_in(x)
for layer in self.layers:
feature = layer(feature, fencoder, mask)
out = self.conv_out(feature)
return out, feature
class Model(nn.Module):
"""
this model predicts both frame-level classes and boundaries.
Args:
in_channel:
n_feature: 64
n_classes: the number of action classes
n_layers: 10
"""
def __init__(
self,
in_channel: int,
n_features: int,
n_classes: int,
n_stages: int,
n_layers: int,
n_refine_layers: int,
n_stages_asb: Optional[int] = None,
n_stages_brb: Optional[int] = None,
SFI_layer: Optional[int] = None,
dataset: str = None,
**kwargs: Any
) -> None:
if not isinstance(n_stages_asb, int):
n_stages_asb = n_stages
if not isinstance(n_stages_brb, int):
n_stages_brb = n_stages
super().__init__()
self.in_channel = in_channel
node = 19 if dataset == "LARA" else 25
|
def exponential_descrease(idx_decoder, p=3):
return math.exp(-p*idx_decoder)
class Linear_Attention(nn.Module):
def __init__(self,
in_channel,
n_features,
out_channel,
n_heads=4,
drop_out=0.05
):
super().__init__()
self.n_heads = n_heads
self.query_projection = nn.Linear(in_channel, n_features)
self.key_projection = nn.Linear(in_channel, n_features)
self.value_projection = nn.Linear(in_channel, n_features)
self.out_projection = nn.Linear(n_features, out_channel)
self.dropout = nn.Dropout(drop_out)
def elu(self, x):
return torch.sigmoid(x)
# return torch.nn.functional.elu(x) + 1
def forward(self, queries, keys, values, mask):
B, L, _ = queries.shape
_, S, _ = keys.shape
queries = self.query_projection(queries).view(B, L, self.n_heads, -1)
keys = self.key_projection(keys).view(B, S, self.n_heads, -1)
values = self.value_projection(values).view(B, S, self.n_heads, -1)
queries = queries.transpose(1, 2)
keys = keys.transpose(1, 2)
values = values.transpose(1, 2)
queries = self.elu(queries)
keys = self.elu(keys)
KV = torch.einsum('...sd,...se->...de', keys, values)
Z = 1.0 / torch.einsum('...sd,...d->...s',queries, keys.sum(dim=-2)+1e-6)
x = torch.einsum('...de,...sd,...s->...se', KV, queries, Z).transpose(1, 2)
x = x.reshape(B, L, -1)
x = self.out_projection(x)
x = self.dropout(x)
return x * mask[:, 0, :, None]
class AttModule(nn.Module):
def __init__(self, dilation, in_channel, out_channel, stage, alpha):
super(AttModule, self).__init__()
self.stage = stage
self.alpha = alpha
self.feed_forward = nn.Sequential(
nn.Conv1d(in_channel, out_channel, 3, padding=dilation, dilation=dilation),
nn.ReLU()
)
self.instance_norm = nn.InstanceNorm1d(out_channel, track_running_stats=False)
self.att_layer = Linear_Attention(out_channel, out_channel, out_channel)
self.conv_out = nn.Conv1d(out_channel, out_channel, 1)
self.dropout = nn.Dropout()
def forward(self, x, f, mask):
out = self.feed_forward(x)
if self.stage == 'encoder':
q = self.instance_norm(out).permute(0, 2, 1)
out = self.alpha * self.att_layer(q, q, q, mask).permute(0, 2, 1) + out
else:
assert f is not None
q = self.instance_norm(out).permute(0, 2, 1)
f = f.permute(0, 2, 1)
out = self.alpha * self.att_layer(q, q, f, mask).permute(0, 2, 1) + out
out = self.conv_out(out)
out = self.dropout(out)
return (x + out) * mask
class SFI(nn.Module):
def __init__(self, in_channel, n_features):
super().__init__()
self.conv_s = nn.Conv1d(in_channel, n_features, 1)
self.softmax = nn.Softmax(dim=-1)
self.ff = nn.Sequential(nn.Linear(n_features, n_features),
nn.GELU(),
nn.Dropout(0.3),
nn.Linear(n_features, n_features))
def forward(self, feature_s, feature_t, mask):
feature_s = feature_s.permute(0, 2, 1)
n, c, t = feature_s.shape
feature_s = self.conv_s(feature_s)
map = self.softmax(torch.einsum("nct,ndt->ncd", feature_s, feature_t)/t)
feature_cross = torch.einsum("ncd,ndt->nct", map, feature_t)
feature_cross = feature_cross + feature_t
feature_cross = feature_cross.permute(0, 2, 1)
feature_cross = self.ff(feature_cross).permute(0, 2, 1) + feature_t
return feature_cross * mask
class STI(nn.Module):
def __init__(self, node, in_channel, n_features, out_channel, num_layers, SFI_layer, channel_masking_rate=0.3, alpha=1):
super().__init__()
self.SFI_layer = SFI_layer
num_SFI_layers = len(SFI_layer)
self.channel_masking_rate = channel_masking_rate
self.dropout = nn.Dropout2d(p=channel_masking_rate)
self.conv_in = nn.Conv2d(in_channel, num_SFI_layers+1, kernel_size=1)
self.conv_t = nn.Conv1d(node, n_features, 1)
self.SFI_layers = nn.ModuleList(
[SFI(node, n_features) for i in range(num_SFI_layers)])
self.layers = nn.ModuleList(
[AttModule(2 ** i, n_features, n_features, 'encoder', alpha) for i in
range(num_layers)])
self.conv_out = nn.Conv1d(n_features, out_channel, 1)
def forward(self, x, mask):
if self.channel_masking_rate > 0:
x = self.dropout(x)
count = 0
x = self.conv_in(x)
feature_s, feature_t = torch.split(x, (len(self.SFI_layers), 1), dim=1)
feature_t = feature_t.squeeze(1).permute(0, 2, 1)
feature_st = self.conv_t(feature_t)
for index, layer in enumerate(self.layers):
if index in self.SFI_layer:
feature_st = self.SFI_layers[count](feature_s[:,count,:], feature_st, mask)
count+=1
feature_st = layer(feature_st, None, mask)
feature_st = self.conv_out(feature_st)
return feature_st * mask
class Decoder(nn.Module):
def __init__(self, in_channel, n_features, out_channel, num_layers, alpha=1):
super().__init__()
self.conv_in = nn.Conv1d(in_channel, n_features, 1)
self.layers = nn.ModuleList(
[AttModule(2 ** i, n_features, n_features, 'decoder', alpha) for i in
range(num_layers)])
self.conv_out = nn.Conv1d(n_features, out_channel, 1)
def forward(self, x, fencoder, mask):
feature = self.conv_in(x)
for layer in self.layers:
feature = layer(feature, fencoder, mask)
out = self.conv_out(feature)
return out, feature
class Model(nn.Module):
"""
this model predicts both frame-level classes and boundaries.
Args:
in_channel:
n_feature: 64
n_classes: the number of action classes
n_layers: 10
"""
def __init__(
self,
in_channel: int,
n_features: int,
n_classes: int,
n_stages: int,
n_layers: int,
n_refine_layers: int,
n_stages_asb: Optional[int] = None,
n_stages_brb: Optional[int] = None,
SFI_layer: Optional[int] = None,
dataset: str = None,
**kwargs: Any
) -> None:
if not isinstance(n_stages_asb, int):
n_stages_asb = n_stages
if not isinstance(n_stages_brb, int):
n_stages_brb = n_stages
super().__init__()
self.in_channel = in_channel
node = 19 if dataset == "LARA" else 25
| self.SP = MultiScale_GraphConv(13, in_channel, n_features, dataset) | 1 | 2023-12-12 02:27:15+00:00 | 4k |
bolna-ai/bolna | local_setup/demo_server.py | [
{
"identifier": "AssistantManager",
"path": "bolna/agent_manager/assistant_manager.py",
"snippet": "class AssistantManager(BaseManager):\n def __init__(self, agent_config, ws, context_data=None, user_id=None, assistant_id=None,\n connected_through_dashboard=None, cache = None):\n super().__init__()\n self.tools = {}\n self.websocket = ws\n self.agent_config = agent_config\n self.context_data = context_data\n self.tasks = agent_config.get('tasks', [])\n self.task_states = [False] * len(self.tasks)\n self.user_id = user_id\n self.assistant_id = assistant_id\n self.run_id = f\"{self.assistant_id}#{str(int(time.time() * 1000))}\"\n self.connected_through_dashboard = connected_through_dashboard\n self.cache = cache\n \n @staticmethod\n def find_llm_output_price(outputs):\n num_token = 0\n for op in outputs:\n num_token += len(enc.encode(str(op)))\n return 0.0020 * num_token\n\n @staticmethod\n def find_llm_input_token_price(messages):\n total_str = []\n this_run = ''\n prev_run = ''\n num_token = 0\n for message in messages:\n if message['role'] == 'system':\n this_run += message['content']\n\n if message['role'] == 'user':\n this_run += message['content']\n\n if message['role'] == 'assistant':\n num_token += len(enc.encode(str(this_run)))\n this_run += message['content']\n\n return 0.0010 * num_token\n\n async def _save_meta(self, call_sid, stream_sid, messages, transcriber_characters, synthesizer_characters,\n label_flow):\n logger.info(f\"call sid {call_sid}, stream_sid {stream_sid}\")\n # transcriber_cost = time * 0.0043/ 60\n # telephony_cost = cost\n # llm_cost = input_tokens * price + output_tokens * price\n # tts_cost = 0 for now\n # if polly - characters * 16/1000000\n # input_tokens, output_tokens\n\n call_meta = dict()\n call = client.calls(call_sid).fetch()\n call_meta[\"telephony_cost\"] = call.price\n call_meta[\"duration\"] = call.duration\n call_meta[\"transcriber_cost\"] = int(call.duration) * (0.0043 / 60)\n call_meta[\"to_number\"] = call.to_formatted\n recording = client.recordings.list(call_sid=call_sid)[0]\n call_meta[\"recording_url\"] = recordings.media_url\n call_meta[\"tts_cost\"] = 0 if self.tasks[0]['tools_config']['synthesizer']['model'] != \"polly\" else (\n synthesizer_characters * 16 / 1000000)\n call_meta[\"llm_cost\"] = self.find_llm_input_token_price(messages) + self.find_llm_output_token_price(label_flow)\n logger.info(f\"Saving call meta {call_meta}\")\n await self.dynamodb.store_run(self.user_id, self.assistant_id, self.run_id, call_meta)\n\n async def download_record_from_twilio_and_save_to_s3(self, recording_url):\n response = requests.get(recording_url, auth=(account_sid, auth_token))\n if response.status_code == 200:\n bucket_name = 'bolna/'\n object_key = 'user_id/agent_id/run_id.mp3'\n\n # Upload the downloaded MP3 file to S3\n s3.put_object(Bucket=bucket_name, Key=object_key, Body=response.content)\n print(\"MP3 file uploaded to S3 successfully!\")\n\n async def run(self, is_local=False):\n '''\n Run will start all tasks in sequential format\n '''\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n input_parameters = None\n for task_id, task in enumerate(self.tasks):\n task_manager = TaskManager(self.agent_config[\"assistant_name\"], task_id, task, self.websocket,\n context_data=self.context_data, input_parameters=input_parameters,\n user_id=self.user_id, assistant_id=self.assistant_id, run_id=self.run_id, connected_through_dashboard = self.connected_through_dashboard, cache = self.cache)\n await task_manager.load_prompt(self.agent_config[\"assistant_name\"], task_id, is_local=is_local)\n task_output = await task_manager.run()\n task_output['run_id'] = self.run_id\n yield task_id, task_output\n self.task_states[task_id] = True\n if task_id == 0:\n input_parameters = task_output\n logger.info(\"Done with execution of the agent\")"
},
{
"identifier": "configure_logger",
"path": "bolna/helpers/logger_config.py",
"snippet": "def configure_logger(file_name, enabled=True, logging_level='INFO'):\n if logging_level not in VALID_LOGGING_LEVELS:\n logging_level = \"INFO\"\n\n logging.basicConfig(\n level=logging_level,\n format=\"%(asctime)s.%(msecs)03d %(levelname)s {%(module)s} [%(funcName)s] %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n\n logger = logging.getLogger(file_name)\n\n if not enabled:\n logger.disabled = True\n return logger"
},
{
"identifier": "AssistantModel",
"path": "bolna/models.py",
"snippet": "class AssistantModel(BaseModel):\n assistant_name: str\n assistant_type: str = \"other\"\n tasks: List[TaskConfigModel]"
}
] | import os
import asyncio
import json
import uuid
import traceback
import redis.asyncio as redis
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
from typing import List
from dotenv import load_dotenv
from bolna.agent_manager import AssistantManager
from bolna.helpers.logger_config import configure_logger
from bolna.models import AssistantModel | 1,634 |
logger = configure_logger(__name__)
load_dotenv()
redis_pool = redis.ConnectionPool.from_url(os.getenv('REDIS_URL'), decode_responses=True)
redis_client = redis.Redis.from_pool(redis_pool)
active_websockets: List[WebSocket] = []
app = FastAPI()
@app.post("/create_agent")
async def create_agent(agent_data: AssistantModel):
agent_uuid = '{}'.format(str(uuid.uuid4()))
redis_task = asyncio.create_task(redis_client.set(agent_uuid, agent_data.json()))
await asyncio.gather(redis_task)
return {"agent_id": "{}".format(agent_uuid), "state": "created"}
@app.websocket("/chat/v1/{user_id}/{agent_id}")
async def websocket_endpoint(agent_id: str, user_id: str, websocket: WebSocket):
logger.info('ws connected with user_id: {} and agent_id: {}'.format(user_id, agent_id))
await websocket.accept()
active_websockets.append(websocket)
agent_config, context_data = None, None
try:
retrieved_agent_config, retrieved_context_data = await redis_client.mget([agent_id, user_id])
agent_config, context_data = json.loads(retrieved_agent_config), json.loads(retrieved_context_data)
except Exception as e:
raise HTTPException(status_code=404, detail="Agent not found")
is_local = True
|
logger = configure_logger(__name__)
load_dotenv()
redis_pool = redis.ConnectionPool.from_url(os.getenv('REDIS_URL'), decode_responses=True)
redis_client = redis.Redis.from_pool(redis_pool)
active_websockets: List[WebSocket] = []
app = FastAPI()
@app.post("/create_agent")
async def create_agent(agent_data: AssistantModel):
agent_uuid = '{}'.format(str(uuid.uuid4()))
redis_task = asyncio.create_task(redis_client.set(agent_uuid, agent_data.json()))
await asyncio.gather(redis_task)
return {"agent_id": "{}".format(agent_uuid), "state": "created"}
@app.websocket("/chat/v1/{user_id}/{agent_id}")
async def websocket_endpoint(agent_id: str, user_id: str, websocket: WebSocket):
logger.info('ws connected with user_id: {} and agent_id: {}'.format(user_id, agent_id))
await websocket.accept()
active_websockets.append(websocket)
agent_config, context_data = None, None
try:
retrieved_agent_config, retrieved_context_data = await redis_client.mget([agent_id, user_id])
agent_config, context_data = json.loads(retrieved_agent_config), json.loads(retrieved_context_data)
except Exception as e:
raise HTTPException(status_code=404, detail="Agent not found")
is_local = True | agent_manager = AssistantManager(agent_config, websocket, context_data, user_id, agent_id) | 0 | 2023-12-13 09:07:35+00:00 | 4k |
relari-ai/continuous-eval | tests/evaluator_test.py | [
{
"identifier": "Dataset",
"path": "continuous_eval/dataset.py",
"snippet": "class Dataset(pd.DataFrame):\n def __init__(self, data=None, index=None, columns=None, copy=False):\n super().__init__(data=data, index=index, columns=columns, copy=copy)\n self.validate()\n\n def iterate(self):\n for _, row in self.iterrows():\n yield row.to_dict()\n\n def datum(self, index):\n return self.iloc[index].to_dict()\n\n def to_dict(self, *args, **kwargs):\n if \"orient\" not in kwargs:\n kwargs[\"orient\"] = \"records\"\n return super().to_dict(*args, **kwargs)\n\n def validate(self):\n if len(self) == 0:\n raise ValueError(\"Dataset is empty\")\n if not \"question\" in self.columns:\n raise ValueError(\"The dataset should at least question column not found\")\n if not any(\n [\n all([col.value in self.columns for col in required_columns])\n for required_columns in _MINIMAL_REQUIRED_COLUMNS\n ]\n ):\n raise ValueError(\n \"The dataset should at least have one of the following columns: {}\".format(_MINIMAL_REQUIRED_COLUMNS)\n )\n\n for item in self.values:\n if DatumField.QUESTION.value in self.columns:\n itm = item[self.columns.get_loc(DatumField.QUESTION.value)]\n if not isinstance(itm, str):\n raise ValueError(\"Answer must be a string\")\n if DatumField.ANSWER.value in self.columns:\n itm = item[self.columns.get_loc(DatumField.ANSWER.value)]\n if not isinstance(itm, str):\n raise ValueError(\"Answer must be a string\")\n if DatumField.GROUND_TRUTH_ANSWER.value in self.columns:\n itm = item[self.columns.get_loc(DatumField.GROUND_TRUTH_ANSWER.value)]\n if not isinstance(itm, list):\n raise ValueError(\"Ground truth answers must be a list of strings\")\n for answer in itm:\n if not isinstance(answer, str):\n raise ValueError(\"Ground truth answers must be a list of strings\")\n if DatumField.RETRIEVED_CONTEXTS.value in self.columns:\n itm = item[self.columns.get_loc(DatumField.RETRIEVED_CONTEXTS.value)]\n if isinstance(itm, list):\n for ctx in itm:\n if not isinstance(ctx, str):\n raise ValueError(\"Retrieved context must be a list of strings or a string\")\n elif not isinstance(itm, str):\n raise ValueError(\"Retrieved context must be a list of strings or a string\")\n if DatumField.GROUND_TRUTH_CONTEXTS.value in self.columns:\n itm = item[self.columns.get_loc(DatumField.GROUND_TRUTH_CONTEXTS.value)]\n if not isinstance(itm, list):\n raise ValueError(\"Ground truth context must be a list of strings\")\n for answer in itm:\n if not isinstance(answer, str):\n raise ValueError(\"Ground truth context must be a list of strings\")\n\n @classmethod\n def from_jsonl(cls, path: Union[str, Path]):\n with open(path, \"r\") as f:\n data = [json.loads(line) for line in f.readlines()]\n return cls(data)\n\n def to_jsonl(self, path: Union[str, Path]):\n with open(path, \"w\") as f:\n f.write(self.to_json(orient=\"records\", lines=True))"
},
{
"identifier": "GenerationEvaluator",
"path": "continuous_eval/evaluators/generation_evaluator.py",
"snippet": "class GenerationEvaluator(BaseEvaluator):\n def __init__(\n self,\n dataset: Union[Dataset, pd.DataFrame],\n metrics: List[Metric] = [DeterministicFaithfulness()],\n ):\n super().__init__(dataset=dataset, metrics=metrics)\n\n def run(self, batch_size: int = 32, quiet: bool = False):\n batches = self._get_batches(batch_size=batch_size)\n results = {id(metric): list() for metric in self.metrics}\n\n pbar = tqdm(total=len(self.dataset), desc=\"Processing\", disable=quiet)\n for batch in batches:\n for metric in self.metrics:\n results[id(metric)].extend(metric.batch_calculate(batch))\n pbar.update(len(batch))\n pbar.close()\n\n # metrics = {id(metric): metric.batch_calculate(data) for metric in self.metrics}\n self._results = [dict(ChainMap(*x)) for x in zip(*results.values())]\n return self._results"
},
{
"identifier": "RetrievalEvaluator",
"path": "continuous_eval/evaluators/retrieval_evaluator.py",
"snippet": "class RetrievalEvaluator(BaseEvaluator):\n def __init__(\n self,\n dataset: Dataset,\n metrics: List[Metric],\n ):\n super().__init__(dataset=dataset, metrics=metrics)\n\n def run(\n self,\n k: int = None,\n batch_size: Optional[Union[int, float]] = 32,\n quiet: bool = False,\n ):\n assert k is None or isinstance(k, int) and k > 0, \"K must be a positive integer or None.\"\n\n batches = self._get_batches(batch_size=batch_size)\n results = {id(metric): list() for metric in self.metrics}\n\n pbar = tqdm(total=len(self.dataset), desc=\"Processing\", disable=quiet)\n for batch in batches:\n batch = self._preprocess_batch(batch, k)\n for metric in self.metrics:\n results[id(metric)].extend(metric.batch_calculate(batch))\n pbar.update(len(batch))\n pbar.close()\n\n self._results = [dict(ChainMap(*x)) for x in zip(*results.values())]\n return self._results\n\n def _preprocess_batch(self, batch, k):\n if k is None:\n return batch\n if k is not None:\n # Filer out the retrieved contexts\n batch = batch.copy()\n for datum in batch:\n if len(datum[\"retrieved_contexts\"]) >= k:\n datum[\"retrieved_contexts\"] = datum[\"retrieved_contexts\"][:k]\n return batch"
},
{
"identifier": "DeterministicAnswerCorrectness",
"path": "continuous_eval/metrics/generation_deterministic_metrics.py",
"snippet": "class DeterministicAnswerCorrectness(Metric):\n def calculate(self, answer, ground_truths, **kwargs):\n # calculate the max score across all ground truth answers\n token_scores = [TokenOverlap().calculate(answer, gt_answer) for gt_answer in ground_truths]\n rouge_scores = [RougeScore().calculate(answer, gt_answer) for gt_answer in ground_truths]\n bleu_scores = [BleuScore().calculate(answer, gt_answer) for gt_answer in ground_truths]\n\n return {\n metric: max(score.get(metric, 0) for score in token_scores + rouge_scores + bleu_scores)\n for metric in [\"rouge_l_recall\", \"rouge_l_precision\", \"rouge_l_f1\", \"token_overlap_recall\", \"token_overlap_precision\", \"token_overlap_f1\", \"bleu_score\"]\n }"
},
{
"identifier": "DummyMetric",
"path": "tests/helpers/dummy_metric.py",
"snippet": "class DummyMetric(Metric):\n def __init__(self, result_keys: Set[str]):\n self._result_keys = result_keys\n\n def calculate(self, **kwargs):\n return {k: random() for k in self._result_keys}"
}
] | import tempfile
import pandas as pd
import pytest
from continuous_eval.dataset import Dataset
from continuous_eval.evaluators import GenerationEvaluator, RetrievalEvaluator
from continuous_eval.metrics import DeterministicAnswerCorrectness
from tests.helpers.dummy_metric import DummyMetric | 1,797 |
retrieval_dataset = Dataset.from_jsonl("tests/data/retrieval_sm.jsonl")
generation_dataset = Dataset.from_jsonl("tests/data/correctness_sm.jsonl")
def test_retieval_evaluator():
expected_keys = {"precision", "NDCG", "recall"}
|
retrieval_dataset = Dataset.from_jsonl("tests/data/retrieval_sm.jsonl")
generation_dataset = Dataset.from_jsonl("tests/data/correctness_sm.jsonl")
def test_retieval_evaluator():
expected_keys = {"precision", "NDCG", "recall"}
| evaluator = RetrievalEvaluator( | 2 | 2023-12-08 21:30:39+00:00 | 4k |
ryanhe312/STSSNet-AAAI2024 | train.py | [
{
"identifier": "STSSNet",
"path": "model.py",
"snippet": "class STSSNet(nn.Module):\n def __init__(self, in_ch, out_ch, feat_ch, his_ch, skip=True):\n super(STSSNet, self).__init__()\n self.skip = skip\n\n self.convHis1 = nn.Sequential(\n nn.Conv2d(his_ch, 24, kernel_size=3, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(24, 24, kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(24, 24, kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True)\n )\n self.convHis2 = nn.Sequential(\n nn.Conv2d(24, 32, kernel_size=3, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True)\n )\n self.convHis3 = nn.Sequential(\n nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True)\n )\n \n self.latentEncoder = nn.Sequential(\n nn.Conv2d(32+feat_ch, 64, kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size = 3, stride = 1, dilation = 1, padding = 1, bias=True)\n )\n self.KEncoder = nn.Sequential(\n nn.Conv2d(feat_ch, 32, kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size = 3, stride = 1, dilation = 1, padding = 1, bias=True)\n )\n\n self.lowlevelGated = LWGatedConv2D(32*3, 32, kernel_size=3, stride=1, pad=1)\n\n self.conv1 = LWGatedConv2D(in_ch+in_ch+feat_ch, 24, kernel_size=3, stride=1, pad=1)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = LWGatedConv2D(24, 24, kernel_size=3, stride=1, pad=1)\n self.relu2 = nn.ReLU(inplace=True)\n self.down1 = DownLWGated(24, 24)\n self.down2 = DownLWGated(24, 32)\n self.down3 = DownLWGated(32, 32)\n \n self.up1 = Up(96+32, 32)\n self.up2 = Up(56, 24)\n self.up3 = Up(48, 24)\n self.outc = nn.Conv2d(24, out_ch*4, kernel_size=1)\n self.outfinal = nn.PixelShuffle(2)\n \n def hole_inpaint(self, x, mask, feature):\n x_down = x\n mask_down = F.interpolate(mask,scale_factor=0.125,mode='bilinear')\n feature_down = F.interpolate(feature,scale_factor=0.125,mode='bilinear')\n\n latent_code = self.latentEncoder(torch.cat([x_down,feature_down], dim=1)) * mask_down\n K_map = F.normalize(self.KEncoder(feature_down), p=2, dim=1)\n \n b,c,h,w = list(K_map.size())\n md = 2\n f1 = F.unfold(K_map*mask_down, kernel_size=(2*md+1, 2*md+1), padding=(md, md), stride=(1, 1))\n f1 = f1.view([b, c, -1, h, w])\n f2 = K_map.view([b, c, 1, h, w])\n weight_k = torch.relu((f1*f2).sum(dim=1, keepdim=True))\n \n b,c,h,w = list(latent_code.size())\n v = F.unfold(latent_code, kernel_size=(2*md+1, 2*md+1), padding=(md, md), stride=(1, 1))\n v = v.view([b, c, -1, h, w])\n \n agg_latent = (v * weight_k).sum(dim=2)/(weight_k.sum(dim=2).clamp_min(1e-6))\n return agg_latent\n \n def forward(self, x, feature, mask, hisBuffer):\n\n hisBuffer = hisBuffer.reshape(-1, 4, hisBuffer.shape[-2], hisBuffer.shape[-1])\n\n hisDown1 = self.convHis1(hisBuffer)\n hisDown2 = self.convHis2(hisDown1)\n hisDown3 = self.convHis3(hisDown2)\n cathisDown3 = hisDown3.reshape(-1, 3*32, hisDown3.shape[-2], hisDown3.shape[-1]) # 64\n\n motionFeature = self.lowlevelGated(cathisDown3)\n \n x1 = torch.cat([x, x*mask, feature],dim=1)\n x1 = self.conv1(x1)\n x1 = self.relu1(x1)\n x1 = self.conv2(x1)\n x1 = self.relu2(x1)\n\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n\n inpaint_feat = self.hole_inpaint(x4, mask, feature)\n x4 = torch.cat([inpaint_feat, motionFeature], dim=1)\n\n res = self.up1(x4, x3)\n res= self.up2(res, x2)\n res= self.up3(res, x1)\n logits = self.outc(res)\n logits = self.outfinal(logits)\n\n if self.skip:\n x1, x2 = x.chunk(2,dim=1)\n x_up = F.interpolate(x1,scale_factor=2,mode='bilinear')\n logits = logits + x_up\n\n return logits"
},
{
"identifier": "metrics",
"path": "utils/metrics.py",
"snippet": "class cvtColor:\n def __init__(self) -> None:\n def rgb2ycbcr(self, tensor):\n def ycrcb2rgb(self, tensor):\ndef accuracy(output, target):\ndef top_k_acc(output, target, k=3):\ndef mse(output, target):\ndef psnr(output, target, only_y=False):\ndef ssim(output, target, only_y=False):\n R = tensor[:,0:1]\n G = tensor[:,1:2]\n B = tensor[:,2:3]\n Y = self.rgb2ycbcr_coeffs[0] * R + self.rgb2ycbcr_coeffs[1] * G + self.rgb2ycbcr_coeffs[2] * B + self.rgb2ycbcr_coeffs[3]\n Y = tensor[:,0:1]\n R = self.ycbcr2rgb_coeffs[0] * Y + self.ycbcr2rgb_coeffs[1] * Cb + self.ycbcr2rgb_coeffs[2] * Cr + self.ycbcr2rgb_coeffs[3]\n G = self.ycbcr2rgb_coeffs[4] * Y + self.ycbcr2rgb_coeffs[5] * Cb + self.ycbcr2rgb_coeffs[6] * Cr + self.ycbcr2rgb_coeffs[7]\n B = self.ycbcr2rgb_coeffs[8] * Y + self.ycbcr2rgb_coeffs[9] * Cb + self.ycbcr2rgb_coeffs[10] * Cr + self.ycbcr2rgb_coeffs[11]"
}
] | import os
import time
import torch
import lpips
import torchvision as tv
import torch.nn.functional as F
import torch.utils.data as data
from torch import optim
from torch.cuda import amp
from visdom import Visdom
from model import STSSNet
from tqdm.auto import tqdm
from dataloaders import *
from utils import metrics | 2,328 |
mdevice=torch.device("cuda:0")
learningrate=1e-4
epoch=100
printevery=50
batch_size=2
class VisdomWriter:
def __init__(self, visdom_port):
self.viz = Visdom(port=visdom_port)
self.names = []
def add_scalar(self, name, val, step):
try:
val = val.item()
except:
val = float(val)
if name not in self.names:
self.names.append(name)
self.viz.line([val], [step], win=name, opts=dict(title=name))
else:
self.viz.line([val], [step], win=name, update='append')
def add_image(self, name, image, step):
self.viz.image(image, win=name, opts=dict(title=name))
def close(self):
return
def colornorm(img):
img = img.clamp(0,1)
return img
def train(dataLoaderIns, modelSavePath, save_dir, reload=None, port=2336):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
vgg_model = lpips.LPIPS(net='vgg').cuda()
|
mdevice=torch.device("cuda:0")
learningrate=1e-4
epoch=100
printevery=50
batch_size=2
class VisdomWriter:
def __init__(self, visdom_port):
self.viz = Visdom(port=visdom_port)
self.names = []
def add_scalar(self, name, val, step):
try:
val = val.item()
except:
val = float(val)
if name not in self.names:
self.names.append(name)
self.viz.line([val], [step], win=name, opts=dict(title=name))
else:
self.viz.line([val], [step], win=name, update='append')
def add_image(self, name, image, step):
self.viz.image(image, win=name, opts=dict(title=name))
def close(self):
return
def colornorm(img):
img = img.clamp(0,1)
return img
def train(dataLoaderIns, modelSavePath, save_dir, reload=None, port=2336):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
vgg_model = lpips.LPIPS(net='vgg').cuda()
| model = STSSNet(6,3,9,4) | 0 | 2023-12-10 02:02:37+00:00 | 4k |
Seunggu0305/VLCounter | tools/test_carpk.py | [
{
"identifier": "save_density_map",
"path": "tools/util.py",
"snippet": "def save_density_map(query_img, pred_D,attn, GT_D,output_dir, fname='results.png', class_chosen=None, pred_cnt=None):\n\n if query_img is not None:\n _,h,w = query_img.shape\n query_img = query_img.cpu().numpy()\n query_img = 255.0 * (query_img - np.min(query_img) + 1e-10) / (1e-10 + np.max(query_img) - np.min(query_img))\n query_img = query_img.squeeze()\n query_img = query_img.transpose(1,2,0)\n query_img = cv2.cvtColor(query_img,cv2.COLOR_BGR2RGB)\n \n if pred_D is not None:\n pred_D = pred_D.cpu().detach().numpy()\n if pred_cnt is None:\n pred_cnt = np.sum(pred_D)\n pred_D = 255.0 * (pred_D - np.min(pred_D) + 1e-10) / (1e-10 + np.max(pred_D) - np.min(pred_D))\n pred_D = pred_D.squeeze()\n pred_D = cv2.applyColorMap(pred_D[:, :, np.newaxis].astype(np.uint8).repeat(3, axis=2), cv2.COLORMAP_JET)\n \n if attn is not None:\n attn = attn.cpu().detach().numpy()\n attn = 255.0 * (attn - np.min(attn) + 1e-10) / (1e-10 + np.max(attn) - np.min(attn))\n attn = attn.squeeze()\n attn = cv2.applyColorMap(attn[:, :, np.newaxis].astype(np.uint8).repeat(3, axis=2), cv2.COLORMAP_JET)\n\n if GT_D is not None:\n GT_D = GT_D.cpu().detach().numpy()\n gt_cnt = np.sum(GT_D)\n GT_D = 255.0 * (GT_D - np.min(GT_D) + 1e-10) / (1e-10 + np.max(GT_D) - np.min(GT_D))\n GT_D = GT_D.squeeze()\n GT_D = cv2.applyColorMap(GT_D[:, :, np.newaxis].astype(np.uint8).repeat(3, axis=2), cv2.COLORMAP_JET) \n\n \n # h,w = pred_D.shape[:2]\n # cv2.putText(query_img,class_chosen,(0,20),cv2.FONT_HERSHEY_PLAIN, 2.0, (0,0,0), 1)\n # if pred_D is not None:\n # cv2.putText(pred_D,\"Den Predict\", (0,20), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 1)\n # cv2.putText(pred_D,str(pred_cnt), (0,h-3), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 1)\n # if GT_D is not None:\n # cv2.putText(GT_D,\"Den GT\", (0,20), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 1)\n # cv2.putText(GT_D,str(gt_cnt), (0,h-3), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 1)\n\n query_result = np.hstack((query_img,pred_D,attn,GT_D))\n\n cv2.imwrite(os.path.join(output_dir,'{}.jpg'.format(fname)), query_result)"
},
{
"identifier": "save_density_map_carpk",
"path": "tools/util.py",
"snippet": "def save_density_map_carpk(query_img, pred_D,attn,cnt_err,output_dir, fname='results.png', class_chosen=None, pred_cnt=None):\n\n if query_img is not None:\n _,h,w = query_img.shape\n query_img = query_img.cpu().numpy()\n query_img = 255.0 * (query_img - np.min(query_img) + 1e-10) / (1e-10 + np.max(query_img) - np.min(query_img))\n query_img = query_img.squeeze()\n query_img = query_img.transpose(1,2,0)\n query_img = cv2.cvtColor(query_img,cv2.COLOR_BGR2RGB)\n \n if pred_D is not None:\n pred_D = pred_D.cpu().detach().numpy()\n if pred_cnt is None:\n pred_cnt = np.sum(pred_D)\n pred_D = 255.0 * (pred_D - np.min(pred_D) + 1e-10) / (1e-10 + np.max(pred_D) - np.min(pred_D))\n pred_D = pred_D.squeeze()\n pred_D = cv2.applyColorMap(pred_D[:, :, np.newaxis].astype(np.uint8).repeat(3, axis=2), cv2.COLORMAP_JET)\n \n if attn is not None:\n attn = attn.cpu().detach().numpy()\n attn = 255.0 * (attn - np.min(attn) + 1e-10) / (1e-10 + np.max(attn) - np.min(attn))\n attn = attn.squeeze()\n attn = cv2.applyColorMap(attn[:, :, np.newaxis].astype(np.uint8).repeat(3, axis=2), cv2.COLORMAP_JET)\n\n # h,w = pred_D.shape[:2]\n # cv2.putText(query_img,class_chosen,(0,20),cv2.FONT_HERSHEY_PLAIN, 2.0, (0,0,0), 1)\n # if query_img is not None:\n # cv2.putText(query_img,\"Den Predict\", (0,20), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 1)\n # cv2.putText(query_img,str(cnt_err), (0,h-3), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 1)\n\n # if pred_D is not None:\n # cv2.putText(pred_D,\"Den Predict\", (0,20), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 1)\n # cv2.putText(pred_D,str(pred_cnt), (0,h-3), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 1)\n\n query_result = np.hstack((query_img,pred_D,attn))\n\n cv2.imwrite(os.path.join(output_dir,'{}.jpg'.format(fname)), query_result)"
},
{
"identifier": "get_model_dir",
"path": "tools/util.py",
"snippet": "def get_model_dir(args: argparse.Namespace) -> str:\n \"\"\"\n Obtain the directory to save/load the model\n \"\"\"\n path = os.path.join(\n args.MODEL.model_dir,\n args.DATA.train_name,\n f'exp_{args.exp}'\n )\n return path"
},
{
"identifier": "get_model_dir_carpk",
"path": "tools/util.py",
"snippet": "def get_model_dir_carpk(args: argparse.Namespace) -> str:\n \"\"\"\n Obtain the directory to save/load the model\n \"\"\"\n path = os.path.join(\n args.MODEL.model_dir,\n args.DATA.train_name,\n f'exp_{args.exp}',\n 'inference_carpk'\n )\n return path"
}
] | import os
import torch
import torch.nn as nn
import numpy as np
import argparse
import time
import random
import yaml
import scipy.ndimage as ndimage
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import hub
from dotmap import DotMap
from torch.utils.tensorboard import SummaryWriter
from .models.Counter_vit_tc_unet_info import Counter
from .util import save_density_map, save_density_map_carpk, get_model_dir, get_model_dir_carpk
from PIL import Image
from torchvision import transforms
from .tokenizer import tokenize | 2,399 |
# from .models.Counter_vit_af_tc_info_unet_v4 import Counter
def parse_args() -> None:
parser = argparse.ArgumentParser(description='Zero Shot Object Counting')
parser.add_argument('--config', type=str, required=True, help='config file')
parser.add_argument('--gpus', type=lambda s: [int(item) for item in s.split(',')], required=True, help='gpu ids')
parser.add_argument('--enc', type=str, required=True, help='LIT encoder setting')
parser.add_argument('--prompt', type=str, required=True, help='num of prompt')
parser.add_argument('--ckpt_used', type=str, required=True, help='best checkpoint')
parser.add_argument('--exp', type=int, required=True, help='exp')
parsed = parser.parse_args()
assert parsed.config is not None
with open(parsed.config, 'r') as f:
config = yaml.safe_load(f)
args = DotMap(config)
args.config = parsed.config
args.gpus = parsed.gpus
args.enc = parsed.enc
args.prompt = parsed.prompt
args.EVALUATION.ckpt_used = parsed.ckpt_used
args.exp = parsed.exp
if args.enc == 'res101':
args.MODEL.pretrain = '/workspace/YESCUTMIX/pretrain/RN101.pt'
return args
def main(args):
local_rank = args.local_rank
if args.TRAIN.manual_seed is not None:
cudnn.benchmark = False
cudnn.deterministic = True
torch.cuda.manual_seed(args.TRAIN.manual_seed)
np.random.seed(args.TRAIN.manual_seed)
torch.manual_seed(args.TRAIN.manual_seed)
torch.cuda.manual_seed_all(args.TRAIN.manual_seed)
random.seed(args.TRAIN.manual_seed)
model = Counter(args).cuda()
root_model = get_model_dir(args)
if args.EVALUATION.ckpt_used is not None:
filepath = os.path.join(root_model, f'{args.EVALUATION.ckpt_used}.pth')
assert os.path.isfile(filepath), filepath
print("=> loading model weight '{}'".format(filepath),flush=True)
checkpoint = torch.load(filepath)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded model weight '{}'".format(filepath),flush=True)
else:
print("=> Not loading anything",flush=True)
# test_loader = get_val_loader(args,mode='test')
ds_test = hub.load("hub://activeloop/carpk-test")
#dataloader_train = ds_train.pytorch(num_workers=args.num_workers, batch_size=1, shuffle=False)
test_loader = ds_test.pytorch(num_workers=args.DATA.workers, batch_size=1, shuffle=False)
|
# from .models.Counter_vit_af_tc_info_unet_v4 import Counter
def parse_args() -> None:
parser = argparse.ArgumentParser(description='Zero Shot Object Counting')
parser.add_argument('--config', type=str, required=True, help='config file')
parser.add_argument('--gpus', type=lambda s: [int(item) for item in s.split(',')], required=True, help='gpu ids')
parser.add_argument('--enc', type=str, required=True, help='LIT encoder setting')
parser.add_argument('--prompt', type=str, required=True, help='num of prompt')
parser.add_argument('--ckpt_used', type=str, required=True, help='best checkpoint')
parser.add_argument('--exp', type=int, required=True, help='exp')
parsed = parser.parse_args()
assert parsed.config is not None
with open(parsed.config, 'r') as f:
config = yaml.safe_load(f)
args = DotMap(config)
args.config = parsed.config
args.gpus = parsed.gpus
args.enc = parsed.enc
args.prompt = parsed.prompt
args.EVALUATION.ckpt_used = parsed.ckpt_used
args.exp = parsed.exp
if args.enc == 'res101':
args.MODEL.pretrain = '/workspace/YESCUTMIX/pretrain/RN101.pt'
return args
def main(args):
local_rank = args.local_rank
if args.TRAIN.manual_seed is not None:
cudnn.benchmark = False
cudnn.deterministic = True
torch.cuda.manual_seed(args.TRAIN.manual_seed)
np.random.seed(args.TRAIN.manual_seed)
torch.manual_seed(args.TRAIN.manual_seed)
torch.cuda.manual_seed_all(args.TRAIN.manual_seed)
random.seed(args.TRAIN.manual_seed)
model = Counter(args).cuda()
root_model = get_model_dir(args)
if args.EVALUATION.ckpt_used is not None:
filepath = os.path.join(root_model, f'{args.EVALUATION.ckpt_used}.pth')
assert os.path.isfile(filepath), filepath
print("=> loading model weight '{}'".format(filepath),flush=True)
checkpoint = torch.load(filepath)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded model weight '{}'".format(filepath),flush=True)
else:
print("=> Not loading anything",flush=True)
# test_loader = get_val_loader(args,mode='test')
ds_test = hub.load("hub://activeloop/carpk-test")
#dataloader_train = ds_train.pytorch(num_workers=args.num_workers, batch_size=1, shuffle=False)
test_loader = ds_test.pytorch(num_workers=args.DATA.workers, batch_size=1, shuffle=False)
| root_model = get_model_dir_carpk(args) | 3 | 2023-12-13 08:00:28+00:00 | 4k |
qitan/devops-backend-lite | dbapp/model/model_workflow.py | [
{
"identifier": "Environment",
"path": "dbapp/model/model_cmdb.py",
"snippet": "class Environment(TimeAbstract):\n \"\"\"环境\"\"\"\n name = models.CharField(max_length=100, unique=True, verbose_name='环境')\n alias = models.CharField(max_length=128, default='', verbose_name='环境别名')\n ticket_on = models.SmallIntegerField(default=0, choices=((0, '不启用'), (1, '启用')), verbose_name='启用工单',\n help_text=\"是否启用工单\\n(0, '不启用'), (1, '启用'), 默认: 0\")\n merge_on = models.SmallIntegerField(default=0, choices=((0, '不启用'), (1, '启用')), verbose_name='分支合并',\n help_text=\"是否要求分支合并\\n(0, '不启用'), (1, '启用'), 默认: 0\")\n template = models.JSONField(default=dict, verbose_name='应用配置',\n help_text='从数据字典接口获取,对应项的key为TEMPLATE, 数据格式为对象.\\n对应项的extra属性.\\n参数说明:\\nstrategy: 策略配置\\n - replicas: 副本, integer\\n - revisionHistoryLimit: 保留副本, integer\\n - minReadySeconds: 更新等待时间, integer\\n - maxSurge/maxUnavailable: 比例缩放 \\n\\nresources: 资源配额\\n - limits.cpu: CPU限制\\n - limits.memory: 内存限制\\n - requests.cpu: CPU请求\\n - requests.memory: 内存请求 \\n\\nenv: 环境变量, 数组[{\"name\": \"env1\", \"value\": \"value1\"}]')\n allow_ci_branch = models.JSONField(default=list, verbose_name='允许构建的分支',\n help_text=\"存储数组格式,具体的分支名; 默认['*'], 表示允许所有分支.\")\n allow_cd_branch = models.JSONField(default=list, verbose_name='允许发布的分支',\n help_text=\"存储数组格式,具体的分支名; 默认['*'], 表示允许所有分支.\")\n extra = models.JSONField(\n default=dict, verbose_name='额外参数', help_text='更多参数')\n desc = models.TextField(null=True, blank=True, verbose_name='环境描述')\n sort = models.IntegerField(default=999, verbose_name=\"排序标记\")\n\n def __str__(self):\n return self.name\n\n class ExtMeta:\n related = True\n dashboard = True\n\n class Meta:\n db_table = 'cmdb_environment'\n ordering = ['sort']\n verbose_name = '环境'\n verbose_name_plural = verbose_name + '管理'"
},
{
"identifier": "CreateTimeAbstract",
"path": "common/extends/models.py",
"snippet": "class CreateTimeAbstract(models.Model):\n created_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name='创建时间')\n\n class ExtMeta:\n related = False\n dashboard = False\n\n class Meta:\n abstract = True"
},
{
"identifier": "CommonParent",
"path": "common/extends/models.py",
"snippet": "class CommonParent(models.Model):\n parent = models.ForeignKey(\"self\", null=True, blank=True, on_delete=models.SET_NULL, related_name='children')\n\n class Meta:\n abstract = True"
},
{
"identifier": "UserProfile",
"path": "dbapp/model/model_ucenter.py",
"snippet": "class UserProfile(TimeAbstract, AbstractUser):\n \"\"\"\n 用户信息\n \"\"\"\n mobile = models.CharField(max_length=11, null=True,\n blank=True, verbose_name=\"手机号码\")\n avatar = models.ImageField(upload_to=\"static/%Y/%m\", default=\"image/default.png\",\n max_length=250, null=True, blank=True)\n department = models.ManyToManyField(\n Organization, related_name='org_user', verbose_name='部门')\n position = models.CharField(\n max_length=50, null=True, blank=True, verbose_name=\"职能\")\n title = models.CharField(max_length=50, null=True,\n blank=True, verbose_name=\"职位\")\n leader_user_id = models.CharField(\n max_length=64, null=True, blank=True, verbose_name=\"直属领导ID\")\n roles = models.ManyToManyField(\n \"Role\", verbose_name=\"角色\", related_name='user_role', blank=True)\n dn = models.CharField(max_length=120, null=True,\n blank=True, unique=True, verbose_name=\"ldap dn\")\n is_ldap = models.BooleanField(default=False, verbose_name=\"是否ldap用户\")\n ding_userid = models.CharField(\n max_length=150, null=True, blank=True, verbose_name=\"钉钉用户ID\")\n feishu_userid = models.CharField(\n max_length=120, null=True, blank=True, verbose_name=\"飞书UserID\")\n feishu_unionid = models.CharField(\n max_length=120, null=True, blank=True, verbose_name='飞书UnionID')\n feishu_openid = models.CharField(\n max_length=120, null=True, blank=True, verbose_name='飞书OpenID')\n\n @property\n def name(self):\n if self.first_name:\n return self.first_name\n if self.last_name:\n return self.last_name\n return self.username\n\n def __str__(self):\n return self.name\n\n class ExtMeta:\n related = True\n dashboard = False\n icon = 'peoples'\n\n class Meta:\n db_table = 'ucenter_userprofile'\n default_permissions = ()\n verbose_name = \"用户信息\"\n verbose_name_plural = verbose_name\n ordering = ['id']"
},
{
"identifier": "TimeAbstract",
"path": "dbapp/models.py",
"snippet": ""
}
] | from datetime import datetime
from django.db import models
from dbapp.model.model_cmdb import Environment
from common.extends.models import CreateTimeAbstract, CommonParent
from dbapp.model.model_ucenter import UserProfile
from dbapp.models import TimeAbstract
from markdown import Markdown
import shortuuid | 2,231 | """
@Author : Ken Chen
@Contact : [email protected]
@Time : 2021/11/2 上午9:50
"""
class WorkflowCategory(models.Model):
"""
工单模板分组
"""
name = models.CharField(max_length=80, unique=True, verbose_name='分类名')
desc = models.TextField(verbose_name='描述', null=True, blank=True)
sort = models.IntegerField(default=999, verbose_name='排序')
def __str__(self):
return self.name
class Meta:
db_table = 'workflow_workflowcategory'
ordering = ['sort']
class WorkflowTemplateAbstract(TimeAbstract):
"""
工单模板 抽象类
"""
category = models.ForeignKey(
WorkflowCategory, null=True, verbose_name='所属分类', on_delete=models.SET_NULL)
name = models.CharField(max_length=100, unique=True, verbose_name='工单模板名')
products = models.JSONField(
default=list, verbose_name='关联产品', help_text='存储产品ID')
projects = models.JSONField(default=list, verbose_name='关联项目',
help_text='产品项目ID数组, eg: [[product_id, project_id]]')
environment = models.ForeignKey(
Environment, on_delete=models.SET_NULL, null=True, blank=True, verbose_name='关联环境')
enabled = models.BooleanField(default=True, verbose_name='是否启用')
nodes = models.JSONField(verbose_name='节点配置')
revision = models.IntegerField(
default=0, verbose_name='版本号') # 模板每次变更, 更新版本号加 1
comment = models.CharField(
max_length=100, null=True, blank=True, verbose_name='模板备注')
sort = models.IntegerField(default=999, verbose_name='排序')
@property
def node_list(self):
return [i['name'] for i in self.nodes]
def get_node_conf(self, node_name):
node_index = self.node_list.index(node_name)
return self.nodes[node_index]
class Meta:
abstract = True
ordering = ['sort']
def __str__(self):
return self.name
class WorkflowTemplate(WorkflowTemplateAbstract):
"""
工单模板
"""
class Meta:
db_table = 'workflow_workflowtemplate'
class WorkflowTemplateRevisionHistory(WorkflowTemplateAbstract):
"""
工单模板版本历史保存
创建工单的时候检查当前模板版本号是否在本模型中存在
如果不存在, 从 TicketTemplate 复制一份到这边。
"""
name = models.CharField(max_length=100, verbose_name='工单模板名')
class Meta:
db_table = 'workflow_workflowtemplaterevisionhistory'
class Workflow(TimeAbstract):
"""
工单
"""
class STATUS:
close = '已关闭'
revoke = '已撤回'
reject = '被驳回'
wait = '待处理'
complete = '已完成'
failed = '执行失败'
choices = (
(close, close),
(revoke, revoke),
(reject, reject),
(wait, wait),
(complete, complete),
(failed, failed)
)
wid = models.CharField(max_length=40, null=True, blank=True, unique=True, verbose_name='工单号',
help_text='前端不需要传值')
topic = models.CharField(max_length=200, verbose_name='工单标题')
node = models.CharField(max_length=50, verbose_name='当前节点名')
status = models.CharField(
max_length=30, choices=STATUS.choices, verbose_name='工单状态')
creator = models.ForeignKey(
| """
@Author : Ken Chen
@Contact : [email protected]
@Time : 2021/11/2 上午9:50
"""
class WorkflowCategory(models.Model):
"""
工单模板分组
"""
name = models.CharField(max_length=80, unique=True, verbose_name='分类名')
desc = models.TextField(verbose_name='描述', null=True, blank=True)
sort = models.IntegerField(default=999, verbose_name='排序')
def __str__(self):
return self.name
class Meta:
db_table = 'workflow_workflowcategory'
ordering = ['sort']
class WorkflowTemplateAbstract(TimeAbstract):
"""
工单模板 抽象类
"""
category = models.ForeignKey(
WorkflowCategory, null=True, verbose_name='所属分类', on_delete=models.SET_NULL)
name = models.CharField(max_length=100, unique=True, verbose_name='工单模板名')
products = models.JSONField(
default=list, verbose_name='关联产品', help_text='存储产品ID')
projects = models.JSONField(default=list, verbose_name='关联项目',
help_text='产品项目ID数组, eg: [[product_id, project_id]]')
environment = models.ForeignKey(
Environment, on_delete=models.SET_NULL, null=True, blank=True, verbose_name='关联环境')
enabled = models.BooleanField(default=True, verbose_name='是否启用')
nodes = models.JSONField(verbose_name='节点配置')
revision = models.IntegerField(
default=0, verbose_name='版本号') # 模板每次变更, 更新版本号加 1
comment = models.CharField(
max_length=100, null=True, blank=True, verbose_name='模板备注')
sort = models.IntegerField(default=999, verbose_name='排序')
@property
def node_list(self):
return [i['name'] for i in self.nodes]
def get_node_conf(self, node_name):
node_index = self.node_list.index(node_name)
return self.nodes[node_index]
class Meta:
abstract = True
ordering = ['sort']
def __str__(self):
return self.name
class WorkflowTemplate(WorkflowTemplateAbstract):
"""
工单模板
"""
class Meta:
db_table = 'workflow_workflowtemplate'
class WorkflowTemplateRevisionHistory(WorkflowTemplateAbstract):
"""
工单模板版本历史保存
创建工单的时候检查当前模板版本号是否在本模型中存在
如果不存在, 从 TicketTemplate 复制一份到这边。
"""
name = models.CharField(max_length=100, verbose_name='工单模板名')
class Meta:
db_table = 'workflow_workflowtemplaterevisionhistory'
class Workflow(TimeAbstract):
"""
工单
"""
class STATUS:
close = '已关闭'
revoke = '已撤回'
reject = '被驳回'
wait = '待处理'
complete = '已完成'
failed = '执行失败'
choices = (
(close, close),
(revoke, revoke),
(reject, reject),
(wait, wait),
(complete, complete),
(failed, failed)
)
wid = models.CharField(max_length=40, null=True, blank=True, unique=True, verbose_name='工单号',
help_text='前端不需要传值')
topic = models.CharField(max_length=200, verbose_name='工单标题')
node = models.CharField(max_length=50, verbose_name='当前节点名')
status = models.CharField(
max_length=30, choices=STATUS.choices, verbose_name='工单状态')
creator = models.ForeignKey( | UserProfile, null=True, on_delete=models.SET_NULL, verbose_name='发起人') | 3 | 2023-12-13 03:09:32+00:00 | 4k |
timo-reymann/python-oauth2-cli-auth | oauth2_cli_auth/http_server.py | [
{
"identifier": "_method_with_timeout",
"path": "oauth2_cli_auth/_timeout.py",
"snippet": "def _method_with_timeout(your_method, timeout_seconds=5, *args, **kwargs):\n signal.signal(signal.SIGALRM, _timeout_handler)\n signal.alarm(timeout_seconds)\n\n try:\n result = your_method(*args, **kwargs)\n except TimeoutException as te:\n raise te\n finally:\n signal.alarm(0) # Reset the alarm\n\n return result"
},
{
"identifier": "TimeoutException",
"path": "oauth2_cli_auth/_timeout.py",
"snippet": "class TimeoutException(Exception):\n pass"
}
] | from http.server import BaseHTTPRequestHandler, HTTPServer
from string import Template
from typing import Optional
from urllib.parse import parse_qs, urlparse
from oauth2_cli_auth._timeout import _method_with_timeout, TimeoutException | 1,801 | opacity: 0
}
100% {
opacity: 100
}
}
.icon svg {
padding: 1rem;
}
.icon svg polyline {
-webkit-animation: checkmark 0.25s ease-in-out 0.7s backwards;
animation: checkmark 0.25s ease-in-out 0.7s backwards
}
.icon svg circle {
-webkit-animation: checkmark-circle 0.6s ease-in-out backwards;
animation: checkmark-circle 0.6s ease-in-out backwards;
}
.icon svg circle#colored {
-webkit-animation: colored-circle 0.6s ease-in-out 0.7s backwards;
animation: colored-circle 0.6s ease-in-out 0.7s backwards;
}
</style>
</head>
<body>
<div class="message">
<div class="animation-ctn">
<div class="icon">
$svg
</div>
</div>
<h1>$title</h1>
<p>$message</p>
</div>
</body>
</html>
""")
def render(self, title: str, message: str, lang: str = "en", has_error: bool = False):
return self.PAGE_TEMPLATE.substitute(
lang=lang,
title=title,
message=message,
svg=self.ERROR_SVG if has_error else self.SUCCESS_SVG,
)
class OAuthRedirectHandler(BaseHTTPRequestHandler):
callback_template = CallbackPageTemplate()
def log_message(self, format, *args):
# silence the log messages
pass
def do_GET(self):
params = parse_qs(urlparse(self.path).query)
has_error = "code" not in params or len(params['code']) != 1 or params['code'][0].strip() == ""
if has_error:
self.send_response(400)
title = "Oh snap!"
message = "Something went wrong trying to authenticate you. Please try going back in your browser, or restart the auth process."
else:
self.send_response(200)
self.server._code = params["code"][0]
title = "Success"
message = "You have been authenticated successfully. You may close this browser window now and go back to the terminal"
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
self.callback_template
.render(
lang="en",
title=title,
message=message,
has_error=has_error
)
.encode("utf-8")
)
class OAuthCallbackHttpServer(HTTPServer):
"""
Simplistic HTTP Server to provide local callback URL for oauth2 provider
"""
def __init__(self, port):
super().__init__(("", port), OAuthRedirectHandler)
self._code = None
def get_code(self):
return self._code
@property
def callback_url(self):
return f"http://localhost:{self.server_port}"
def wait_for_code(self, attempts: int = 3, timeout_per_attempt=10) -> Optional[int]:
"""
Wait for the server to open the callback page containing the code query parameter.
It tries for #attempts with a timeout of #timeout_per_attempts for each attempt.
This prevents the CLI from getting stuck by unsolved callback URls
:param attempts: Amount of attempts
:param timeout_per_attempt: Timeout for each attempt to be successful
:return: Code from callback page or None if the callback page is not called successfully
"""
for i in range(0, attempts):
try:
_method_with_timeout(self.handle_request, timeout_seconds=timeout_per_attempt)
|
class CallbackPageTemplate:
SUCCESS_SVG = """
<svg xmlns="http://www.w3.org/2000/svg" width="154px" height="154px">
<g fill="none" stroke="#22AE73" stroke-width="2">
<circle cx="77" cy="77" r="72" style="stroke-dasharray:480px, 480px; stroke-dashoffset: 960px;"></circle>
<circle id="colored" fill="#22AE73" cx="77" cy="77" r="72" style="stroke-dasharray:480px, 480px; stroke-dashoffset: 960px;"></circle>
<polyline class="st0" stroke="#fff" stroke-width="10" points="43.5,77.8 63.7,97.9 112.2,49.4 " style="stroke-dasharray:100px, 100px; stroke-dashoffset: 200px;"/>
</g>
</svg>
"""
ERROR_SVG = """
<svg xmlns="http://www.w3.org/2000/svg" width="154px" height="154px">
<g fill="none" stroke="#F44812" stroke-width="2">
<circle cx="77" cy="77" r="72" style="stroke-dasharray:480px, 480px; stroke-dashoffset: 960px;"></circle>
<circle id="colored" fill="#F44812" cx="77" cy="77" r="72" style="stroke-dasharray:480px, 480px; stroke-dashoffset: 960px;"></circle>
<polyline class="st0" stroke="#fff" stroke-width="10" points="43.5,77.8 112.2,77.8 " style="stroke-dasharray:100px, 100px; stroke-dashoffset: 200px;"/>
</g>
</svg>
"""
PAGE_TEMPLATE = Template("""
<html lang="$lang">
<head>
<title>$title</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<meta name="charset" content="utf-8">
<style>
* {
margin: 0;
padding: 0;
}
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";
}
@media (prefers-color-scheme: dark) {
body {
background: rgb(34, 39, 46);
color: rgb(173, 186, 199);
}
}
html, body {
display: flex;
align-items: center;
justify-content: center;
height: 100%;
}
h1 {
font-size: 4rem;
}
p {
font-size: 1.4rem;
max-width: 70ch;
}
.message {
text-align: center;
}
.animation-ctn {
text-align: center;
}
@keyframes checkmark {
0% {
stroke-dashoffset: 100px
}
100% {
stroke-dashoffset: 0px
}
}
@keyframes checkmark-circle {
0% {
stroke-dashoffset: 480px
}
100% {
stroke-dashoffset: 960px
}
}
@keyframes colored-circle {
0% {
opacity: 0
}
100% {
opacity: 100
}
}
.icon svg {
padding: 1rem;
}
.icon svg polyline {
-webkit-animation: checkmark 0.25s ease-in-out 0.7s backwards;
animation: checkmark 0.25s ease-in-out 0.7s backwards
}
.icon svg circle {
-webkit-animation: checkmark-circle 0.6s ease-in-out backwards;
animation: checkmark-circle 0.6s ease-in-out backwards;
}
.icon svg circle#colored {
-webkit-animation: colored-circle 0.6s ease-in-out 0.7s backwards;
animation: colored-circle 0.6s ease-in-out 0.7s backwards;
}
</style>
</head>
<body>
<div class="message">
<div class="animation-ctn">
<div class="icon">
$svg
</div>
</div>
<h1>$title</h1>
<p>$message</p>
</div>
</body>
</html>
""")
def render(self, title: str, message: str, lang: str = "en", has_error: bool = False):
return self.PAGE_TEMPLATE.substitute(
lang=lang,
title=title,
message=message,
svg=self.ERROR_SVG if has_error else self.SUCCESS_SVG,
)
class OAuthRedirectHandler(BaseHTTPRequestHandler):
callback_template = CallbackPageTemplate()
def log_message(self, format, *args):
# silence the log messages
pass
def do_GET(self):
params = parse_qs(urlparse(self.path).query)
has_error = "code" not in params or len(params['code']) != 1 or params['code'][0].strip() == ""
if has_error:
self.send_response(400)
title = "Oh snap!"
message = "Something went wrong trying to authenticate you. Please try going back in your browser, or restart the auth process."
else:
self.send_response(200)
self.server._code = params["code"][0]
title = "Success"
message = "You have been authenticated successfully. You may close this browser window now and go back to the terminal"
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
self.callback_template
.render(
lang="en",
title=title,
message=message,
has_error=has_error
)
.encode("utf-8")
)
class OAuthCallbackHttpServer(HTTPServer):
"""
Simplistic HTTP Server to provide local callback URL for oauth2 provider
"""
def __init__(self, port):
super().__init__(("", port), OAuthRedirectHandler)
self._code = None
def get_code(self):
return self._code
@property
def callback_url(self):
return f"http://localhost:{self.server_port}"
def wait_for_code(self, attempts: int = 3, timeout_per_attempt=10) -> Optional[int]:
"""
Wait for the server to open the callback page containing the code query parameter.
It tries for #attempts with a timeout of #timeout_per_attempts for each attempt.
This prevents the CLI from getting stuck by unsolved callback URls
:param attempts: Amount of attempts
:param timeout_per_attempt: Timeout for each attempt to be successful
:return: Code from callback page or None if the callback page is not called successfully
"""
for i in range(0, attempts):
try:
_method_with_timeout(self.handle_request, timeout_seconds=timeout_per_attempt) | except TimeoutException: | 1 | 2023-12-09 12:14:33+00:00 | 4k |
Chris10M/Ev2Hands | src/Ev2Hands/model/TEHNet.py | [
{
"identifier": "PointNetSetAbstractionMsg",
"path": "src/Ev2Hands/model/pointnet2_utils.py",
"snippet": "class PointNetSetAbstractionMsg(nn.Module):\n def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list):\n super(PointNetSetAbstractionMsg, self).__init__()\n self.npoint = npoint\n self.radius_list = radius_list\n self.nsample_list = nsample_list\n self.conv_blocks = nn.ModuleList()\n self.bn_blocks = nn.ModuleList()\n for i in range(len(mlp_list)):\n convs = nn.ModuleList()\n bns = nn.ModuleList()\n last_channel = in_channel + 3\n for out_channel in mlp_list[i]:\n convs.append(nn.Conv2d(last_channel, out_channel, 1))\n bns.append(nn.BatchNorm2d(out_channel))\n last_channel = out_channel\n self.conv_blocks.append(convs)\n self.bn_blocks.append(bns)\n\n def forward(self, xyz, points):\n \"\"\"\n Input:\n xyz: input points position data, [B, C, N]\n points: input points data, [B, D, N]\n Return:\n new_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n \"\"\"\n xyz = xyz.permute(0, 2, 1).contiguous()\n if points is not None:\n points = points.permute(0, 2, 1).contiguous()\n\n B, N, C = xyz.shape\n S = self.npoint\n new_xyz = index_points(xyz, farthest_point_sample(xyz, S))\n new_points_list = []\n for i, radius in enumerate(self.radius_list):\n K = self.nsample_list[i]\n group_idx = query_ball_point(radius, K, xyz, new_xyz)\n grouped_xyz = index_points(xyz, group_idx)\n grouped_xyz -= new_xyz.view(B, S, 1, C)\n if points is not None:\n grouped_points = index_points(points, group_idx)\n grouped_points = torch.cat([grouped_points, grouped_xyz], dim=-1)\n else:\n grouped_points = grouped_xyz\n\n grouped_points = grouped_points.permute(0, 3, 2, 1).contiguous() # [B, D, K, S]\n for j in range(len(self.conv_blocks[i])):\n conv = self.conv_blocks[i][j]\n bn = self.bn_blocks[i][j]\n grouped_points = F.relu(bn(conv(grouped_points)))\n new_points = torch.max(grouped_points, 2)[0] # [B, D', S]\n new_points_list.append(new_points)\n\n new_xyz = new_xyz.permute(0, 2, 1).contiguous()\n new_points_concat = torch.cat(new_points_list, dim=1)\n return new_xyz, new_points_concat"
},
{
"identifier": "PointNetSetAbstraction",
"path": "src/Ev2Hands/model/pointnet2_utils.py",
"snippet": "class PointNetSetAbstraction(nn.Module):\n def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all):\n super(PointNetSetAbstraction, self).__init__()\n self.npoint = npoint\n self.radius = radius\n self.nsample = nsample\n self.mlp_convs = nn.ModuleList()\n self.mlp_bns = nn.ModuleList()\n last_channel = in_channel\n for out_channel in mlp:\n self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))\n self.mlp_bns.append(nn.BatchNorm2d(out_channel))\n last_channel = out_channel\n self.group_all = group_all\n\n def forward(self, xyz, points):\n \"\"\"\n Input:\n xyz: input points position data, [B, C, N]\n points: input points data, [B, D, N]\n Return:\n new_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n \"\"\"\n xyz = xyz.permute(0, 2, 1).contiguous()\n if points is not None:\n points = points.permute(0, 2, 1).contiguous()\n\n if self.group_all:\n new_xyz, new_points = sample_and_group_all(xyz, points)\n else:\n new_xyz, new_points = sample_and_group(self.npoint, self.radius, self.nsample, xyz, points)\n # new_xyz: sampled points position data, [B, npoint, C]\n # new_points: sampled points data, [B, npoint, nsample, C+D]\n new_points = new_points.permute(0, 3, 2, 1).contiguous() # [B, C+D, nsample,npoint]\n for i, conv in enumerate(self.mlp_convs):\n bn = self.mlp_bns[i]\n new_points = F.relu(bn(conv(new_points)))\n\n new_points = torch.max(new_points, 2)[0]\n new_xyz = new_xyz.permute(0, 2, 1).contiguous()\n return new_xyz, new_points"
},
{
"identifier": "PointNetFeaturePropagation",
"path": "src/Ev2Hands/model/pointnet2_utils.py",
"snippet": "class PointNetFeaturePropagation(nn.Module):\n def __init__(self, in_channel, mlp):\n super(PointNetFeaturePropagation, self).__init__()\n self.mlp_convs = nn.ModuleList()\n self.mlp_bns = nn.ModuleList()\n last_channel = in_channel\n for out_channel in mlp:\n self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))\n self.mlp_bns.append(nn.BatchNorm1d(out_channel))\n last_channel = out_channel\n\n def forward(self, xyz1, xyz2, points1, points2):\n \"\"\"\n Input:\n xyz1: input points position data, [B, C, N]\n xyz2: sampled input points position data, [B, C, S]\n points1: input points data, [B, D, N]\n points2: input points data, [B, D, S]\n Return:\n new_points: upsampled points data, [B, D', N]\n \"\"\"\n xyz1 = xyz1.permute(0, 2, 1).contiguous()\n xyz2 = xyz2.permute(0, 2, 1).contiguous()\n\n points2 = points2.permute(0, 2, 1).contiguous()\n B, N, C = xyz1.shape\n _, S, _ = xyz2.shape\n\n if S == 1:\n interpolated_points = points2.repeat(1, N, 1)\n else:\n dists = square_distance(xyz1, xyz2)\n dists, idx = dists.sort(dim=-1)\n dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]\n\n dist_recip = 1.0 / (dists + 1e-8)\n norm = torch.sum(dist_recip, dim=2, keepdim=True)\n weight = dist_recip / norm\n interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)\n\n if points1 is not None:\n points1 = points1.permute(0, 2, 1).contiguous()\n new_points = torch.cat([points1, interpolated_points], dim=-1)\n else:\n new_points = interpolated_points\n\n new_points = new_points.permute(0, 2, 1).contiguous()\n for i, conv in enumerate(self.mlp_convs):\n bn = self.mlp_bns[i]\n new_points = F.relu(bn(conv(new_points)))\n return new_points"
}
] | import numpy as np
import torch.nn as nn
import torch
import os
import torch.nn.functional as F
from .pointnet2_utils import PointNetSetAbstractionMsg, PointNetSetAbstraction, PointNetFeaturePropagation | 3,103 | def __init__(self):
super(AttentionBlock, self).__init__()
def forward(self, key, value, query):
query = query.permute(0, 2, 1)
N, KC = key.shape[:2]
key = key.view(N, KC, -1)
N, KC = value.shape[:2]
value = value.view(N, KC, -1)
sim_map = torch.bmm(key, query)
sim_map = (KC ** -.5 ) * sim_map
sim_map = F.softmax(sim_map, dim=1)
context = torch.bmm(sim_map, value)
return context
class MANORegressor(nn.Module):
def __init__(self, n_inp_features=4, n_pose_params=6, n_shape_params=10):
super(MANORegressor, self).__init__()
normal_channel = True
if normal_channel:
additional_channel = n_inp_features
else:
additional_channel = 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], additional_channel, [[128, 128, 256], [128, 196, 256]])
self.sa2 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512], group_all=True)
self.n_pose_params = n_pose_params
self.n_mano_params = n_pose_params + n_shape_params
self.mano_regressor = nn.Sequential(
nn.Linear(512, 1024),
nn.ReLU(),
nn.BatchNorm1d(1024),
nn.Dropout(0.3),
nn.Linear(1024, 3 + self.n_mano_params + 3),
)
def J3dtoJ2d(self, j3d, scale):
B, N = j3d.shape[:2]
device = j3d.device
j2d = torch.zeros(B, N, 2, device=device)
j2d[:, :, 0] = scale[:, :, 0] * j3d[:, :, 0]
j2d[:, :, 1] = scale[:, :, 1] * j3d[:, :, 1]
return j2d
def forward(self, xyz, features, mano_hand, previous_mano_params=None):
device = xyz.device
batch_size = xyz.shape[0]
l0_xyz = xyz
l0_points = features
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l2_xyz = l2_xyz.squeeze(-1)
l2_points = l2_points.squeeze(-1)
if previous_mano_params is None:
previous_mano_params = torch.zeros(self.n_mano_params).unsqueeze(0).expand(batch_size, -1).to(device)
previous_rot_trans_params = torch.zeros(6).unsqueeze(0).expand(batch_size, -1).to(device)
mano_params = self.mano_regressor(l2_points)
global_orient = mano_params[:, :3]
hand_pose = mano_params[:, 3:3+self.n_pose_params]
betas = mano_params[:, 3+self.n_pose_params:-3]
transl = mano_params[:, -3:]
device = mano_hand.shapedirs.device
mano_args = {
'global_orient': global_orient.to(device),
'hand_pose' : hand_pose.to(device),
'betas' : betas.to(device),
'transl' : transl.to(device),
}
mano_outs = dict()
output = mano_hand(**mano_args)
mano_outs['vertices'] = output.vertices
mano_outs['j3d'] = output.joints
mano_outs.update(mano_args)
if not self.training:
mano_outs['faces'] = np.tile(mano_hand.faces, (batch_size, 1, 1))
return mano_outs
class TEHNet(nn.Module):
def __init__(self, n_pose_params, num_classes=4):
super(TEHNet, self).__init__()
normal_channel = True
if normal_channel:
additional_channel = 1 + int(os.getenv('ERPC', 0))
else:
additional_channel = 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [32, 64, 128], 3+additional_channel, [[32, 32, 64], [64, 64, 128], [64, 96, 128]])
self.sa2 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], 128+128+64, [[128, 128, 256], [128, 196, 256]])
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512, 1024], group_all=True)
|
class AttentionBlock(nn.Module):
def __init__(self):
super(AttentionBlock, self).__init__()
def forward(self, key, value, query):
query = query.permute(0, 2, 1)
N, KC = key.shape[:2]
key = key.view(N, KC, -1)
N, KC = value.shape[:2]
value = value.view(N, KC, -1)
sim_map = torch.bmm(key, query)
sim_map = (KC ** -.5 ) * sim_map
sim_map = F.softmax(sim_map, dim=1)
context = torch.bmm(sim_map, value)
return context
class MANORegressor(nn.Module):
def __init__(self, n_inp_features=4, n_pose_params=6, n_shape_params=10):
super(MANORegressor, self).__init__()
normal_channel = True
if normal_channel:
additional_channel = n_inp_features
else:
additional_channel = 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], additional_channel, [[128, 128, 256], [128, 196, 256]])
self.sa2 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512], group_all=True)
self.n_pose_params = n_pose_params
self.n_mano_params = n_pose_params + n_shape_params
self.mano_regressor = nn.Sequential(
nn.Linear(512, 1024),
nn.ReLU(),
nn.BatchNorm1d(1024),
nn.Dropout(0.3),
nn.Linear(1024, 3 + self.n_mano_params + 3),
)
def J3dtoJ2d(self, j3d, scale):
B, N = j3d.shape[:2]
device = j3d.device
j2d = torch.zeros(B, N, 2, device=device)
j2d[:, :, 0] = scale[:, :, 0] * j3d[:, :, 0]
j2d[:, :, 1] = scale[:, :, 1] * j3d[:, :, 1]
return j2d
def forward(self, xyz, features, mano_hand, previous_mano_params=None):
device = xyz.device
batch_size = xyz.shape[0]
l0_xyz = xyz
l0_points = features
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l2_xyz = l2_xyz.squeeze(-1)
l2_points = l2_points.squeeze(-1)
if previous_mano_params is None:
previous_mano_params = torch.zeros(self.n_mano_params).unsqueeze(0).expand(batch_size, -1).to(device)
previous_rot_trans_params = torch.zeros(6).unsqueeze(0).expand(batch_size, -1).to(device)
mano_params = self.mano_regressor(l2_points)
global_orient = mano_params[:, :3]
hand_pose = mano_params[:, 3:3+self.n_pose_params]
betas = mano_params[:, 3+self.n_pose_params:-3]
transl = mano_params[:, -3:]
device = mano_hand.shapedirs.device
mano_args = {
'global_orient': global_orient.to(device),
'hand_pose' : hand_pose.to(device),
'betas' : betas.to(device),
'transl' : transl.to(device),
}
mano_outs = dict()
output = mano_hand(**mano_args)
mano_outs['vertices'] = output.vertices
mano_outs['j3d'] = output.joints
mano_outs.update(mano_args)
if not self.training:
mano_outs['faces'] = np.tile(mano_hand.faces, (batch_size, 1, 1))
return mano_outs
class TEHNet(nn.Module):
def __init__(self, n_pose_params, num_classes=4):
super(TEHNet, self).__init__()
normal_channel = True
if normal_channel:
additional_channel = 1 + int(os.getenv('ERPC', 0))
else:
additional_channel = 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [32, 64, 128], 3+additional_channel, [[32, 32, 64], [64, 64, 128], [64, 96, 128]])
self.sa2 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], 128+128+64, [[128, 128, 256], [128, 196, 256]])
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512, 1024], group_all=True) | self.fp3 = PointNetFeaturePropagation(in_channel=1536, mlp=[256, 256]) | 2 | 2023-12-13 08:18:53+00:00 | 4k |
solanav/phishflood | phishings/views.py | [
{
"identifier": "rabbit_conf",
"path": "config/rabbit_conf.py",
"snippet": "HOST = \"rabbitmq\"\nQUEUE = \"phishings_queue\"\nEXCHANGE = \"phishings\"\nROUTINGKEY = \"info\""
},
{
"identifier": "Phishing",
"path": "phishings/models.py",
"snippet": "class Phishing(models.Model):\n id = models.CharField(\n max_length=255, primary_key=True, default=None, editable=False\n )\n url = models.URLField(max_length=512)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def save(self, *args, **kwargs):\n self.id = hashlib.sha256(self.url.encode(\"utf-8\")).hexdigest()\n super(Phishing, self).save(*args, **kwargs)\n \n def __str__(self) -> str:\n return f\"<URL: {self.url}, ID: {self.id}>\""
},
{
"identifier": "Form",
"path": "phishings/models.py",
"snippet": "class Form(models.Model):\n id = models.CharField(\n max_length=255, primary_key=True, default=None, editable=False\n )\n phishing = models.ForeignKey(\n Phishing, on_delete=models.CASCADE, related_name=\"forms\"\n )\n meta_id = models.IntegerField()\n\n html_id = models.CharField(max_length=255, null=True)\n html_action = models.CharField(max_length=255, null=True)\n html_method = models.CharField(max_length=255, null=True)\n html_type = models.CharField(max_length=255, null=True)\n page = models.IntegerField(blank=False, null=False)\n\n def save(self, *args, **kwargs):\n self.id = f\"{self.phishing.id}-{self.page}-{self.meta_id}\"\n super(Form, self).save(*args, **kwargs)\n \n def __str__(self) -> str:\n return f\"<URL: {self.phishing.url}, PAGE: {self.page}, ID: {self.meta_id}>\""
},
{
"identifier": "Input",
"path": "phishings/models.py",
"snippet": "class Input(models.Model):\n id = models.CharField(\n max_length=255, primary_key=True, default=None, editable=False\n )\n\n form = models.ForeignKey(Form, on_delete=models.CASCADE, related_name=\"inputs\")\n\n meta_id = models.IntegerField()\n\n html_id = models.CharField(max_length=255, null=True)\n html_name = models.CharField(max_length=255, null=True)\n html_placeholder = models.CharField(max_length=255, null=True)\n html_type = models.CharField(max_length=255, null=True)\n\n def save(self, *args, **kwargs):\n self.id = f\"{self.form.id}-{self.meta_id}\"\n super(Input, self).save(*args, **kwargs)\n \n def __str__(self) -> str:\n return f\"<URL: {self.form.phishing.url}, PAGE: {self.form.page}, FORM: {self.form.meta_id}, ID: {self.meta_id}>\""
},
{
"identifier": "Action",
"path": "phishings/models.py",
"snippet": "class Action(models.Model):\n phishing = models.ForeignKey(\n Phishing, on_delete=models.CASCADE, related_name=\"actions\"\n )\n action = models.CharField(max_length=255)\n form = models.ForeignKey(Form, on_delete=models.CASCADE)\n input = models.ForeignKey(Input, on_delete=models.CASCADE)\n value = models.CharField(max_length=255, blank=True, null=True)\n status = models.CharField(max_length=255)\n created_at = models.DateTimeField(auto_now_add=True)\n \n def __str__(self) -> str:\n return f\"<URL: {self.phishing.url}, PAGE: {self.form.page}, FORM: {self.form.meta_id}, INPUT: {self.input.meta_id}, ACTION: {self.action}, STATUS: {self.status}>\""
},
{
"identifier": "GroupSerializer",
"path": "phishings/serializers.py",
"snippet": "class GroupSerializer(serializers.ModelSerializer):\n class Meta:\n model = Group\n fields = [\"url\", \"name\"]"
},
{
"identifier": "UserSerializer",
"path": "phishings/serializers.py",
"snippet": "class UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [\"url\", \"username\", \"email\", \"groups\"]"
},
{
"identifier": "PhishingSerializer",
"path": "phishings/serializers.py",
"snippet": "class PhishingSerializer(serializers.ModelSerializer):\n class Meta:\n model = Phishing\n fields = [\"id\", \"url\"]"
},
{
"identifier": "FormSerializer",
"path": "phishings/serializers.py",
"snippet": "class FormSerializer(serializers.ModelSerializer):\n class Meta:\n model = Form\n fields = [\n \"phishing\",\n \"page\",\n \"meta_id\",\n \"html_id\",\n \"html_action\",\n \"html_method\",\n \"html_type\",\n ]"
},
{
"identifier": "InputSerializer",
"path": "phishings/serializers.py",
"snippet": "class InputSerializer(serializers.ModelSerializer):\n class Meta:\n model = Input\n fields = [\n \"form\",\n \"meta_id\",\n \"html_id\",\n \"html_name\",\n \"html_placeholder\",\n \"html_type\",\n ]"
},
{
"identifier": "ActionSerializer",
"path": "phishings/serializers.py",
"snippet": "class ActionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Action\n fields = [\"phishing\", \"action\", \"form\", \"input\", \"value\", \"status\"]"
}
] | import json
import pika
from config import rabbit_conf
from typing import Dict
from phishings.models import Phishing, Form, Input, Action
from rest_framework import viewsets
from django.contrib.auth.models import User, Group
from rest_framework import permissions
from rest_framework.response import Response
from pika.exchange_type import ExchangeType
from phishings.serializers import (
GroupSerializer,
UserSerializer,
PhishingSerializer,
FormSerializer,
InputSerializer,
ActionSerializer,
) | 1,617 |
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by("-date_joined")
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [permissions.IsAuthenticated]
class FullPhishingViewSet(viewsets.ModelViewSet):
"""
API endpoint that returns all phishings as a single object with nested forms, inputs, and actions.
"""
queryset = Phishing.objects.all()
serializer_class = PhishingSerializer
permission_classes = [permissions.IsAuthenticated]
def fullphishing(self, request, id: str) -> Dict:
phishing = Phishing.objects.get(id=id)
json_phishing = self.get_serializer(phishing).data
json_phishing["forms"] = []
json_phishing["actions"] = []
forms = Form.objects.filter(phishing=phishing)
for form in forms:
json_form = FormSerializer(form, context={"request": request}).data
assert isinstance(json_form, Dict)
json_form["inputs"] = []
inputs = Input.objects.filter(form=form)
for input_obj in inputs:
json_input = InputSerializer(
input_obj, context={"request": request}
).data
json_form["inputs"].append(json_input)
json_phishing["forms"].append(json_form)
actions = Action.objects.filter(phishing=phishing)
for action in actions:
|
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by("-date_joined")
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [permissions.IsAuthenticated]
class FullPhishingViewSet(viewsets.ModelViewSet):
"""
API endpoint that returns all phishings as a single object with nested forms, inputs, and actions.
"""
queryset = Phishing.objects.all()
serializer_class = PhishingSerializer
permission_classes = [permissions.IsAuthenticated]
def fullphishing(self, request, id: str) -> Dict:
phishing = Phishing.objects.get(id=id)
json_phishing = self.get_serializer(phishing).data
json_phishing["forms"] = []
json_phishing["actions"] = []
forms = Form.objects.filter(phishing=phishing)
for form in forms:
json_form = FormSerializer(form, context={"request": request}).data
assert isinstance(json_form, Dict)
json_form["inputs"] = []
inputs = Input.objects.filter(form=form)
for input_obj in inputs:
json_input = InputSerializer(
input_obj, context={"request": request}
).data
json_form["inputs"].append(json_input)
json_phishing["forms"].append(json_form)
actions = Action.objects.filter(phishing=phishing)
for action in actions: | json_action = ActionSerializer(action, context={"request": request}).data | 10 | 2023-12-11 16:38:36+00:00 | 4k |
aatmunbaxi/orgroamtools | orgroamtools/data.py | [
{
"identifier": "IdentifierType",
"path": "orgroamtools/_utils.py",
"snippet": "class IdentifierType(Enum):\n \"\"\"\n Nodes in an org-roam graph can identified uniquely by their ID, and non-uniquely\n by their title. This enum disambiguates the the type of an identifier\n for functions that take a generic identifier in as an input.\n e.g. ``RoamGraph.node(identifier)``\n\n Attributes\n ----------\n TITLE : 1\n Indicates identifier is a title\n ID : 2\n Indicates identifier is an ID\n NOTHING : 0\n Indicates identifier is neither a title nor an ID\n \"\"\"\n\n TITLE = 1\n ID = 2\n NOTHING = 0"
},
{
"identifier": "DuplicateTitlesWarning",
"path": "orgroamtools/_utils.py",
"snippet": "class DuplicateTitlesWarning(Warning):\n \"\"\"\n Warns there are multiple nodes with the same title in the graph.\n\n In the case there are multiple nodes with the same title, identifying\n nodes by their title will not be a unique way of picking them out.\n The resulting behavior may not be what the user wants.\n\n Attributes\n ----------\n message : str\n Human readable string describing warning\n \"\"\"\n\n def __init__(self, message):\n self.message = message\n\n def __str__(self):\n return repr(self.message)"
},
{
"identifier": "extract_math_snippets",
"path": "orgroamtools/_utils.py",
"snippet": "def extract_math_snippets(text: str) -> list[str]:\n \"\"\"Return math snippets in text\n\n Parameters\n ----------\n text : ``str``\n Text to extract math snippets from\n\n Returns\n -------\n ``list[str]``\n List of math snippets\n \"\"\"\n return [\n group\n for match in re.findall(ORG_LATEX_RX, text, re.DOTALL)\n for group in match\n if group\n ]"
},
{
"identifier": "extract_src_blocks",
"path": "orgroamtools/_utils.py",
"snippet": "def extract_src_blocks(text: str) -> list[Tuple[str, str]]:\n \"\"\"Return org source blocks\n\n Parameters\n ----------\n text : ``str``\n Text to extract source blocks from\n\n Returns\n -------\n ``list[Tule[str,str]]``\n List of source block environments in the form (LANGUAGE, SRC_BLOCK_BODY)\n \"\"\"\n return [(match[0], match[1].strip()) for match in SRC_BLOCK_RE.findall(text)]"
}
] | import os
import warnings
import sqlite3 as sql
import copy
import networkx as nx
import orgparse as op
from typing import Iterable, Tuple, Optional
from dataclasses import dataclass
from orgroamtools._utils import (
IdentifierType,
DuplicateTitlesWarning,
extract_math_snippets,
extract_src_blocks,
) | 2,593 | except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_misc_links(self, dbpath: str) -> list[list[OrgLink]]:
"""Initialize list of miscellaneous org-mode links
Parameters
----------
dbpath : ``str``
path to org-roam database
Returns
-------
``list[OrgLink]``
List of OrgRoam links that are not other nodes (files, images,
internet links, etc)
Examples
--------
FIXME: Add docs.
"""
q = """SELECT n.id, GROUP_CONCAT(CASE WHEN l.type != '"id"' THEN l.dest END),
GROUP_CONCAT(CASE WHEN l.type != '"id"' THEN l.type END)
FROM
nodes n
LEFT JOIN
links l ON n.id = l.source
GROUP BY
n.id
ORDER BY
n.id;"""
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
clean = lambda s: s.replace('"', "")
quer = csr.execute(q)
output = quer.fetchall()
links_and_types = [
list(
zip(
tuple(clean(row[1]).split(",")),
tuple(clean(row[2]).split(",")),
)
)
if row[1]
else []
for row in output
]
return [
[OrgLink(prop[1], prop[0], None) if prop else [] for prop in lst]
for lst in links_and_types
]
except sql.Error as e:
print("Connection failed: ", e)
return []
def remove_orphans(self) -> RoamGraph:
"""Remove orphans from network
This method returns a new network that has orphans removed.
Returns
-------
RoamGraph
Connected subcollection of self
Examples
--------
FIXME: Add docs.
"""
indices_of_orphans = [
i for i in range(len(self.IDs)) if self.nodes[i] in self._orphans
]
new_node_data = [
data
for idx, data in enumerate(
zip(
self.IDs,
self.titles,
self.fnames,
self._tags,
self._links_to,
self.misc_link_index.values(),
)
)
if idx not in indices_of_orphans
]
new_node_index = {
j[2]: RoamNode(j[0], j[1], j[2], j[3], j[4], j[5]) for j in new_node_data
}
self._node_index = new_node_index
self.refresh()
return self
def __is_orphan(self, node: RoamNode) -> bool:
"""Check if node is an orphan
Parameters
----------
node : ``RoamNode``
Node to check
Returns
-------
``bool``
True if node is an orphan
Examples
--------
FIXME: Add docs.
"""
pointed_to = True if any(node.id in n.backlinks for n in self.nodes) else False
points_to = node.backlinks != []
return not points_to and not pointed_to
| from __future__ import annotations
@dataclass
class RoamNode:
"""Store relevant org-roam node information
A node is an atomic note known to the org-roam database.
It is uniquely determined by an ID generated at the time of creation, but
has other identifiers and information that a user might want to know about.
Attributes
----------
id : ``str``
Unique org ID of org-roam node
title : ``str``
Title of org-roam node
fname : ``str``
Filename of org-roam node
tags : ``set[str]``
Collection of tags of org-roam node
backlinks : ``list[str]``
List of backlinks in org-roam node
misc_links : ``list[OrgLink]``
List of miscellaneous links that are not links to other nodes
"""
id: str
title: str
fname: str
tags: set[str]
backlinks: list[str]
misc_links: list[OrgLink]
@property
def body(self) -> str:
"""Return body of node
Returns
-------
``str``
Body text of node
"""
root = op.load(self.fname)
node_heading = None
for node in root:
if node.get_property("ID") == self.id:
node_heading = node
break
return "\n".join(subtree.get_body() for subtree in node_heading)
class RoamGraph:
"""Store information of ``org-roam`` graph.
By default, the nodes in the _node_index are ordered ascending on
the node IDs. In the documentation, the words "collection", "network",
"graph", all mean the same thing: the graph with nodes the ``org-roam`` nodes
and edges determined by backlinks in the ``org-roam`` collection.
The location of the ``org-roam`` database is the value of ``(org-roam-db-location)``::
from orgroamtools.data import RoamGraph
collection = RoamGraph(LOCATION_OF_DB)
Attributes
----------
db_path : ``str``
Path to org-roam database connected to graph
_id_title_map : ``dict[str,str]``
Map with keys the id of nodes and values the titles of the corresponding nodes
_graph : ``nx.MultiDiGraph``
``networkx`` graph representation of the collection
_node_index : ``dict[str, RoamNode]``
Map with keys the ID of nodes and values the ``RoamNode`` object that corresponds
_orphans : ``list[RoamNode]``
List of orphans in network. An orphan node is one with no links connecting it to any
other node
_is_connected : ``bool``
Tracks if network is connected (i.e. has no orphans)
_duplicate_titles : ``list[str]``
List of duplicated titles in network, used for warning user
_contains_dup_titles : ``bool``
Whether the collection has duplicated titles
"""
@classmethod
def init_empty(self):
"""Initialize empty RoamNode object
Returns
-------
RoamNode object with default fields initialized
"""
self.db_path = None
self._duplicate_titles = []
self._contains_dup_titles = None
self._id_title_map = dict()
self._graph = None
self._node_index = dict()
self._misc_link_index = dict()
self._orphans = []
self._is_connected = None
return self
def __init__(self, db: str):
"""Initializes RoamGraph object
The RoamGraph object stores information about the nodes in the
collection described by the database path provided. The nodes also store
information about how they relate to each other via backlinks.
Parameters
----------
db : ``str``
Path to org-roam database
Examples
--------
>>> collection = RoamGraph(PATH_TO_ORGROAM_DB)
"""
super(RoamGraph, self).__init__()
self.db_path = os.path.expanduser(db)
if not os.path.isfile(self.db_path):
raise AttributeError(f"No such file or directory: {self.db_path}")
_fnames = self.__init_fnames(self.db_path)
_titles = self.__init_titles(self.db_path)
_ids = self.__init_ids(self.db_path)
links = self.__init_links_to(db)
_links_to = [[ID for ID in link_list if ID in _ids] for link_list in links]
_tags = self.__init_tags(self.db_path)
_misc_links = self.__init_misc_links(self.db_path)
self._node_index = {
j[2]: RoamNode(j[0], j[1], j[2], j[3], j[4], j[5])
for j in zip(_ids, _titles, _fnames, _tags, _links_to, _misc_links)
}
seen = set()
self._duplicate_titles = [x for x in self.titles if x in seen or seen.add(x)]
self._contains_dup_titles = len(self._duplicate_titles) > 0
if self._contains_dup_titles:
warnings.warn(
"Collection contains duplicate titles. Matching nodes by title will be non-exhaustive.",
DuplicateTitlesWarning,
)
# In rare cases we'll pick up links to nonexistent nodes
self._misc_link_index = {_ids[i]: _misc_links[i] for i in range(len(_ids))}
self._id_title_map = {_ids[i]: self.titles[i] for i in range(len(_ids))}
self._graph = nx.MultiDiGraph({_ids[i]: _links_to[i] for i in range(len(_ids))})
self._orphans = [
node
for node in self._node_index.values()
if not any(
[
self._nodes_linked(node, other, directed=False)
for other in self._node_index.values()
if other != node
]
)
]
self._is_connected = self._orphans == []
def __filter_tags(self, tags: list[str], exclude: bool) -> list[RoamNode]:
"""Filter network by tags
Parameters
----------
tags : ``list[str]``
List of tags to filter by
exclude : ``bool``
Whether to exclude the tags in the new network or not
"""
tfilter = [self._node_has_tag(node, tag) for node in self.nodes for tag in tags]
if exclude:
tfilter = [not b for b in tfilter]
return [node for (node, b) in zip(self.nodes, tfilter) if b]
def __init_ids(self, dbpath: str) -> list[str]:
"""Initialize list of IDs for each node
Parameters
----------
dbpath : ``str``
Path of org-roam database
Returns
-------
List of node IDs
"""
id_query = "SELECT id FROM nodes ORDER BY id ASC;"
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
query = csr.execute(id_query)
return [i[0].replace('"', "") for i in query.fetchall()]
except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_fnames(self, dbpath: str) -> list[str]:
"""
Initializes list of filenames for each node
Parameters
----------
dbpath : ``str``
Path to org-roam database
Returns
-------
List of node filepaths
"""
fname_query = "SELECT file FROM nodes ORDER BY id ASC;"
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
query = csr.execute(fname_query)
return [i[0].replace('"', "") for i in query.fetchall()]
except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_titles(self, dbpath: str) -> list[str]:
"""
Initialize list of titles for each node
Parameters
----------
dbpath : ``str``
Path to org-roam database
Returns
-------
List of node titles
"""
title_query = "SELECT title FROM nodes ORDER BY id ASC;"
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
query = csr.execute(title_query)
return [i[0].replace('"', "") for i in query.fetchall()]
except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_tags(self, dbpath: str) -> list[set[str]]:
"""
Initialize list of tags for each node
Parameters
----------
dbpath : ``str``
Path to org-roam database
Returns
-------
List of node tags (as sets)
"""
tags_query = "SELECT nodes.id, GROUP_CONCAT(tags.tag) AS tags FROM nodes LEFT JOIN tags ON nodes.id = tags.node_id GROUP BY nodes.id ORDER BY nodes.id ASC;"
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
query = csr.execute(tags_query)
clean = lambda s: s.replace('"', "")
match_null = lambda s: set() if not s else s.split(",")
return [set(map(clean, match_null(i[1]))) for i in query.fetchall()]
except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_links_to(self, dbpath: str) -> list[list[str]]:
"""Initialize list of links
Parameters
----------
dbpath : ``str``
Path to org-roam database
Returns
-------
List of backlinks in node (as a list)
"""
links_to_query = """
SELECT n.id,
GROUP_CONCAT(CASE WHEN l.type = '"id"' THEN l.dest END)
FROM nodes n
LEFT JOIN links l ON n.id = l.source
GROUP BY n.id
ORDER BY n.id ;
"""
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
query = csr.execute(links_to_query)
clean = lambda s: s.replace('"', "")
links = query.fetchall()
# Separated links by comma might still have links we dont want (e.g. files, etc)
self_and_links = [
[clean(i[0])] + list(map(clean, i[1].split(",")))
if i[1]
else [clean(i[0])]
for i in links
]
return self_and_links
except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_misc_links(self, dbpath: str) -> list[list[OrgLink]]:
"""Initialize list of miscellaneous org-mode links
Parameters
----------
dbpath : ``str``
path to org-roam database
Returns
-------
``list[OrgLink]``
List of OrgRoam links that are not other nodes (files, images,
internet links, etc)
Examples
--------
FIXME: Add docs.
"""
q = """SELECT n.id, GROUP_CONCAT(CASE WHEN l.type != '"id"' THEN l.dest END),
GROUP_CONCAT(CASE WHEN l.type != '"id"' THEN l.type END)
FROM
nodes n
LEFT JOIN
links l ON n.id = l.source
GROUP BY
n.id
ORDER BY
n.id;"""
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
clean = lambda s: s.replace('"', "")
quer = csr.execute(q)
output = quer.fetchall()
links_and_types = [
list(
zip(
tuple(clean(row[1]).split(",")),
tuple(clean(row[2]).split(",")),
)
)
if row[1]
else []
for row in output
]
return [
[OrgLink(prop[1], prop[0], None) if prop else [] for prop in lst]
for lst in links_and_types
]
except sql.Error as e:
print("Connection failed: ", e)
return []
def remove_orphans(self) -> RoamGraph:
"""Remove orphans from network
This method returns a new network that has orphans removed.
Returns
-------
RoamGraph
Connected subcollection of self
Examples
--------
FIXME: Add docs.
"""
indices_of_orphans = [
i for i in range(len(self.IDs)) if self.nodes[i] in self._orphans
]
new_node_data = [
data
for idx, data in enumerate(
zip(
self.IDs,
self.titles,
self.fnames,
self._tags,
self._links_to,
self.misc_link_index.values(),
)
)
if idx not in indices_of_orphans
]
new_node_index = {
j[2]: RoamNode(j[0], j[1], j[2], j[3], j[4], j[5]) for j in new_node_data
}
self._node_index = new_node_index
self.refresh()
return self
def __is_orphan(self, node: RoamNode) -> bool:
"""Check if node is an orphan
Parameters
----------
node : ``RoamNode``
Node to check
Returns
-------
``bool``
True if node is an orphan
Examples
--------
FIXME: Add docs.
"""
pointed_to = True if any(node.id in n.backlinks for n in self.nodes) else False
points_to = node.backlinks != []
return not points_to and not pointed_to
| def __identifier_type(self, identifier: str) -> IdentifierType: | 0 | 2023-12-14 04:46:33+00:00 | 4k |
abing7k/redroid-script | stuffs/magisk.py | [
{
"identifier": "General",
"path": "stuffs/general.py",
"snippet": "class General:\n def download(self):\n loc_md5 = \"\"\n if os.path.isfile(self.dl_file_name):\n with open(self.dl_file_name,\"rb\") as f:\n bytes = f.read()\n loc_md5 = hashlib.md5(bytes).hexdigest()\n while not os.path.isfile(self.dl_file_name) or loc_md5 != self.act_md5:\n if os.path.isfile(self.dl_file_name):\n os.remove(self.dl_file_name)\n print_color(\"md5 mismatches, redownloading now ....\",bcolors.YELLOW)\n loc_md5 = download_file(self.dl_link, self.dl_file_name)\n \n def extract(self):\n print_color(\"Extracting archive...\", bcolors.GREEN)\n print(self.dl_file_name)\n print(self.extract_to)\n with zipfile.ZipFile(self.dl_file_name) as z:\n z.extractall(self.extract_to)\n def copy(self):\n pass\n def install(self):\n # pass\n self.download()\n self.extract()\n self.copy()"
},
{
"identifier": "bcolors",
"path": "tools/helper.py",
"snippet": "class bcolors:\n RED = '\\033[31m'\n YELLOW = '\\033[33m'\n GREEN = '\\033[32m'\n ENDC = '\\033[0m'"
},
{
"identifier": "download_file",
"path": "tools/helper.py",
"snippet": "def download_file(url, f_name):\n md5 = \"\"\n response = requests.get(url, stream=True)\n total_size_in_bytes = int(response.headers.get('content-length', 0))\n block_size = 1024 # 1 Kibibyte\n progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)\n with open(f_name, 'wb') as file:\n for data in response.iter_content(block_size):\n progress_bar.update(len(data))\n file.write(data)\n progress_bar.close()\n with open(f_name, \"rb\") as f:\n bytes = f.read()\n md5 = hashlib.md5(bytes).hexdigest()\n if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:\n raise ValueError(\"Something went wrong while downloading\")\n return md5"
},
{
"identifier": "host",
"path": "tools/helper.py",
"snippet": "def host():\n machine = platform.machine()\n\n mapping = {\n \"i686\": (\"x86\", 32),\n \"x86_64\": (\"x86_64\", 64),\n \"aarch64\": (\"arm64-v8a\", 64),\n \"armv7l\": (\"armeabi-v7a\", 32),\n \"armv8l\": (\"armeabi-v7a\", 32)\n }\n if machine in mapping:\n # if mapping[machine] == \"x86_64\":\n # with open(\"/proc/cpuinfo\") as f:\n # if \"sse4_2\" not in f.read():\n # print(\"x86_64 CPU does not support SSE4.2, falling back to x86...\")\n # return (\"x86\", 32)\n return mapping[machine]\n raise ValueError(\"platform.machine '\" + machine + \"'\"\n \" architecture is not supported\")"
},
{
"identifier": "print_color",
"path": "tools/helper.py",
"snippet": "def print_color(str, color):\n print(color+str+bcolors.ENDC)"
},
{
"identifier": "run",
"path": "tools/helper.py",
"snippet": "def run(args):\n result = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if result.stderr:\n print(result.stderr.decode(\"utf-8\"))\n raise subprocess.CalledProcessError(\n returncode = result.returncode,\n cmd = result.args,\n stderr = result.stderr\n )\n return result"
},
{
"identifier": "get_download_dir",
"path": "tools/helper.py",
"snippet": "def get_download_dir():\n download_loc = \"\"\n if os.environ.get(\"XDG_CACHE_HOME\", None) is None:\n download_loc = os.path.join('/', \"home\", os.environ.get(\"SUDO_USER\", os.environ[\"USER\"]), \".cache\", \"redroid\", \"downloads\")\n else:\n download_loc = os.path.join(os.environ[\"XDG_CACHE_HOME\"], \"redroid\", \"downloads\")\n if not os.path.exists(download_loc):\n os.makedirs(download_loc)\n return download_loc"
}
] | import gzip
import os
import shutil
import re
from stuffs.general import General
from tools.helper import bcolors, download_file, host, print_color, run, get_download_dir | 1,880 |
class Magisk(General):
download_loc = get_download_dir()
dl_link = "https://mgb1.androidfilehost.com/dl/_E1ugpo3KLudP2K-WauRfQ/1702724403/10620683726822077179/Magisk+Delta+25206+canary+%284dbd8358%29.apk"
dl_file_name = os.path.join(download_loc, "magisk.apk")
extract_to = "/tmp/magisk_unpack"
copy_dir = "./magisk"
magisk_dir = os.path.join(copy_dir, "system", "etc", "init", "magisk")
machine = host()
oringinal_bootanim = """
service bootanim /system/bin/bootanimation
class core animation
user graphics
group graphics audio
disabled
oneshot
ioprio rt 0
task_profiles MaxPerformance
"""
bootanim_component = """
on post-fs-data
start logd
exec u:r:su:s0 root root -- /system/etc/init/magisk/magisk{arch} --auto-selinux --setup-sbin /system/etc/init/magisk
exec u:r:su:s0 root root -- /system/etc/init/magisk/magiskpolicy --live --magisk "allow * magisk_file lnk_file *"
mkdir /sbin/.magisk 700
mkdir /sbin/.magisk/mirror 700
mkdir /sbin/.magisk/block 700
copy /system/etc/init/magisk/config /sbin/.magisk/config
rm /dev/.magisk_unblock
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --post-fs-data
wait /dev/.magisk_unblock 40
rm /dev/.magisk_unblock
on zygote-start
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --service
on property:sys.boot_completed=1
mkdir /data/adb/magisk 755
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --boot-complete
exec -- /system/bin/sh -c "if [ ! -e /data/data/io.github.huskydg.magisk ] ; then pm install /system/etc/init/magisk/magisk.apk ; fi"
on property:init.svc.zygote=restarting
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart
on property:init.svc.zygote=stopped
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart
""".format(arch=machine[1])
def download(self):
if os.path.isfile(self.dl_file_name):
os.remove(self.dl_file_name)
print_color("Downloading latest Magisk-Delta now .....", bcolors.GREEN)
download_file(self.dl_link, self.dl_file_name)
def copy(self):
if os.path.exists(self.copy_dir):
shutil.rmtree(self.copy_dir)
if not os.path.exists(self.magisk_dir):
os.makedirs(self.magisk_dir, exist_ok=True)
if not os.path.exists(os.path.join(self.copy_dir, "sbin")):
os.makedirs(os.path.join(self.copy_dir, "sbin"), exist_ok=True)
print_color("Copying magisk libs now ...", bcolors.GREEN)
lib_dir = os.path.join(self.extract_to, "lib", self.machine[0])
for parent, dirnames, filenames in os.walk(lib_dir):
for filename in filenames:
o_path = os.path.join(lib_dir, filename)
filename = re.search('lib(.*)\.so', filename)
n_path = os.path.join(self.magisk_dir, filename.group(1))
shutil.copyfile(o_path, n_path)
|
class Magisk(General):
download_loc = get_download_dir()
dl_link = "https://mgb1.androidfilehost.com/dl/_E1ugpo3KLudP2K-WauRfQ/1702724403/10620683726822077179/Magisk+Delta+25206+canary+%284dbd8358%29.apk"
dl_file_name = os.path.join(download_loc, "magisk.apk")
extract_to = "/tmp/magisk_unpack"
copy_dir = "./magisk"
magisk_dir = os.path.join(copy_dir, "system", "etc", "init", "magisk")
machine = host()
oringinal_bootanim = """
service bootanim /system/bin/bootanimation
class core animation
user graphics
group graphics audio
disabled
oneshot
ioprio rt 0
task_profiles MaxPerformance
"""
bootanim_component = """
on post-fs-data
start logd
exec u:r:su:s0 root root -- /system/etc/init/magisk/magisk{arch} --auto-selinux --setup-sbin /system/etc/init/magisk
exec u:r:su:s0 root root -- /system/etc/init/magisk/magiskpolicy --live --magisk "allow * magisk_file lnk_file *"
mkdir /sbin/.magisk 700
mkdir /sbin/.magisk/mirror 700
mkdir /sbin/.magisk/block 700
copy /system/etc/init/magisk/config /sbin/.magisk/config
rm /dev/.magisk_unblock
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --post-fs-data
wait /dev/.magisk_unblock 40
rm /dev/.magisk_unblock
on zygote-start
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --service
on property:sys.boot_completed=1
mkdir /data/adb/magisk 755
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --boot-complete
exec -- /system/bin/sh -c "if [ ! -e /data/data/io.github.huskydg.magisk ] ; then pm install /system/etc/init/magisk/magisk.apk ; fi"
on property:init.svc.zygote=restarting
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart
on property:init.svc.zygote=stopped
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart
""".format(arch=machine[1])
def download(self):
if os.path.isfile(self.dl_file_name):
os.remove(self.dl_file_name)
print_color("Downloading latest Magisk-Delta now .....", bcolors.GREEN)
download_file(self.dl_link, self.dl_file_name)
def copy(self):
if os.path.exists(self.copy_dir):
shutil.rmtree(self.copy_dir)
if not os.path.exists(self.magisk_dir):
os.makedirs(self.magisk_dir, exist_ok=True)
if not os.path.exists(os.path.join(self.copy_dir, "sbin")):
os.makedirs(os.path.join(self.copy_dir, "sbin"), exist_ok=True)
print_color("Copying magisk libs now ...", bcolors.GREEN)
lib_dir = os.path.join(self.extract_to, "lib", self.machine[0])
for parent, dirnames, filenames in os.walk(lib_dir):
for filename in filenames:
o_path = os.path.join(lib_dir, filename)
filename = re.search('lib(.*)\.so', filename)
n_path = os.path.join(self.magisk_dir, filename.group(1))
shutil.copyfile(o_path, n_path) | run(["chmod", "+x", n_path]) | 5 | 2023-12-06 09:03:05+00:00 | 4k |
zvict/papr | models/tx.py | [
{
"identifier": "MLP",
"path": "models/mlp.py",
"snippet": "class MLP(nn.Module):\n def __init__(self, inp_dim=2, num_layers=3, num_channels=128, out_dim=2, act_type=\"leakyrelu\", last_act_type=\"none\",\n use_wn=True, a=1., b=1., trainable=False, skip_layers=[], bias=True, half_layers=[], residual_layers=[],\n residual_dims=[]):\n super(MLP, self).__init__()\n self.skip_layers = skip_layers\n self.residual_layers = residual_layers\n self.residual_dims = residual_dims\n assert len(residual_dims) == len(residual_layers)\n wn = weight_norm if use_wn else lambda x, **kwargs: x\n layers = [nn.Identity()]\n for i in range(num_layers):\n cur_inp = inp_dim if i == 0 else num_channels\n cur_out = out_dim if i == num_layers - 1 else num_channels\n if (i+1) in half_layers:\n cur_out = cur_out // 2\n if i in half_layers:\n cur_inp = cur_inp // 2\n if i in self.skip_layers:\n cur_inp += inp_dim\n if i in self.residual_layers:\n cur_inp += self.residual_dims[residual_layers.index(i)]\n layers.append(\n wn(nn.Linear(cur_inp, cur_out, bias=bias), name='weight'))\n layers.append(activation_func(act_type=act_type,\n num_channels=cur_out, a=a, b=b, trainable=trainable))\n layers[-1] = activation_func(act_type=last_act_type,\n num_channels=out_dim, a=a, b=b, trainable=trainable)\n assert len(layers) == 2 * num_layers + 1\n self.model = nn.ModuleList(layers)\n\n def forward(self, x, residuals=[]):\n skip_layers = [i*2+1 for i in self.skip_layers]\n residual_layers = [i*2+1 for i in self.residual_layers]\n assert len(residuals) == len(self.residual_layers)\n # print(skip_layers)\n inp = x\n for i, layer in enumerate(self.model):\n if i in skip_layers:\n x = torch.cat([x, inp], dim=-1)\n if i in residual_layers:\n x = torch.cat([x, residuals[residual_layers.index(i)]], dim=-1)\n x = layer(x)\n return x"
},
{
"identifier": "PoseEnc",
"path": "models/utils.py",
"snippet": "class PoseEnc(nn.Module):\n def __init__(self, factor=2.0, mult_factor=1.0):\n super(PoseEnc, self).__init__()\n self.factor = factor\n self.mult_factor = mult_factor\n\n def forward(self, x, L_embed, without_self=False):\n return posenc(x, L_embed, self.factor, without_self, self.mult_factor)"
},
{
"identifier": "activation_func",
"path": "models/utils.py",
"snippet": "def activation_func(act_type='leakyrelu', neg_slope=0.2, inplace=True, num_channels=128, a=1., b=1., trainable=False):\n act_type = act_type.lower()\n if act_type == 'none':\n layer = nn.Identity()\n elif act_type == 'leakyrelu':\n layer = nn.LeakyReLU(neg_slope, inplace)\n elif act_type == 'prelu':\n layer = nn.PReLU(num_channels)\n elif act_type == 'relu':\n layer = nn.ReLU(inplace)\n elif act_type == '+1':\n layer = PlusOneActivation()\n elif act_type == 'relu+1':\n layer = nn.Sequential(nn.ReLU(inplace), PlusOneActivation())\n elif act_type == 'tanh':\n layer = nn.Tanh()\n elif act_type == 'shifted_tanh':\n layer = ShiftedTanh()\n elif act_type == 'sigmoid':\n layer = nn.Sigmoid()\n elif act_type == 'gelu':\n layer = nn.GELU()\n elif act_type == 'gaussian':\n layer = GaussianActivation(a, trainable)\n elif act_type == 'quadratic':\n layer = QuadraticActivation(a, trainable)\n elif act_type == 'multi-quadratic':\n layer = MultiQuadraticActivation(a, trainable)\n elif act_type == 'laplacian':\n layer = LaplacianActivation(a, trainable)\n elif act_type == 'super-gaussian':\n layer = SuperGaussianActivation(a, b, trainable)\n elif act_type == 'expsin':\n layer = ExpSinActivation(a, trainable)\n elif act_type == 'clamp':\n layer = Clamp(0, 1)\n elif 'sine' in act_type:\n layer = Sine(factor=a)\n elif 'softplus' in act_type:\n a, b, c = [float(i) for i in act_type.split('_')[1:]]\n print(\n 'Softplus activation: a={:.2f}, b={:.2f}, c={:.2f}'.format(a, b, c))\n layer = SoftplusActivation(a, b, c)\n else:\n raise NotImplementedError(\n 'activation layer [{:s}] is not found'.format(act_type))\n return layer"
}
] | import torch
import torch.nn as nn
import math
from torch import autocast
from .mlp import MLP
from .utils import PoseEnc, activation_func | 2,721 |
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class InstanceNorm(nn.Module):
"Construct a InstanceNorm module"
def __init__(self, eps=1e-6):
super(InstanceNorm, self).__init__()
self.eps = eps
def forward(self, x):
mean = x.mean(0, keepdim=True)
std = x.std(0, keepdim=True)
return (x - mean) / (std + self.eps)
def attention(query, key, kernel_type):
"""
Compute Attention Scores
query: [batch_size, n_heads, query_len, d_kq] or [batch_size, query_len, d_kq]
key: [batch_size, n_heads, seq_len, d_kq] or [batch_size, seq_len, d_kq]
"""
d_kq = query.size(-1)
if kernel_type == "scaled-dot":
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_kq)
elif kernel_type == "-scaled-dot":
scores = -torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_kq)
elif kernel_type == "dot":
scores = torch.matmul(query, key.transpose(-2, -1))
elif kernel_type == "-dot":
scores = -torch.matmul(query, key.transpose(-2, -1))
elif kernel_type == "l1-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=1, dim=-1)
elif kernel_type == "-l1-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=1, dim=-1)
elif kernel_type == "l2-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1)
elif kernel_type == "-l2-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1)
elif kernel_type == "scaled-l2-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1) / math.sqrt(d_kq)
elif kernel_type == "-scaled-l2-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1) / math.sqrt(d_kq)
elif kernel_type == "cosine":
scores = torch.matmul(query, key.transpose(-2, -1)) / (
torch.norm(query, dim=-1, keepdim=True)
* torch.norm(key, dim=-1, keepdim=True).transpose(-2, -1)
)
else:
raise ValueError("Unknown kernel type: {}".format(kernel_type))
return scores
class FeedForward(nn.Module):
"Implements FFN module."
def __init__(self, d_input, d_output, d_ff, n_layer=2, act="relu", last_act="none", dropout=0.1, norm="layernorm",
residual=True, act_a=1.0, act_b=1.0, act_trainable=False, use_wn=False, eps=1e-6, skip_layers=[],
half_layers=[]):
super(FeedForward, self).__init__()
self.eps = eps
self.d_input = d_input
self.d_output = d_output
if norm == "layernorm":
self.innorm = LayerNorm(d_input, eps)
self.outnorm = LayerNorm(d_output, eps)
elif norm == "instancenorm":
self.innorm = InstanceNorm(eps)
self.outnorm = InstanceNorm(eps)
elif norm == "none":
self.innorm = nn.Identity()
self.outnorm = nn.Identity()
else:
raise ValueError("Invalid Transformer norm type")
self.dropout = nn.Dropout(dropout)
self.mlp = MLP(d_input, n_layer, d_ff, d_output, act_type=act, last_act_type=last_act, use_wn=use_wn,
a=act_a, b=act_b, trainable=act_trainable, skip_layers=skip_layers, half_layers=half_layers)
self.residual = residual
def forward(self, x):
if self.residual and x.shape[-1] == self.d_output:
return self.outnorm(x + self.dropout(self.mlp(self.innorm(x))))
else:
return self.outnorm(self.dropout(self.mlp(self.innorm(x))))
class Embeddings(nn.Module):
def __init__(self, d_k, d_q, d_v, d_model, seq_len, args, d_ko=0, d_qo=0, d_vo=0, eps=1e-6):
super(Embeddings, self).__init__()
self.d_k = d_k
self.d_q = d_q
self.d_v = d_v
self.seq_len = seq_len
self.args = args
self.embed_type = args.embed_type
self.d_model = d_model
self.share_embed = args.share_embed
self.d_ko = d_ko
self.d_qo = d_qo
self.d_vo = d_vo
self.eps = eps
|
def get_transformer(args, seq_len, v_extra_dim=0, k_extra_dim=0, q_extra_dim=0, eps=1e-6, use_amp=False, amp_dtype=torch.float16):
k_dim_map = {
1: [3, 3, 3],
}
k_dim = k_dim_map[args.k_type]
q_dim_map = {
1: [3],
}
q_dim = q_dim_map[args.q_type]
v_dim_map = {
1: [3, 3],
}
v_dim = v_dim_map[args.v_type]
return Transformer(d_k=k_dim, d_q=q_dim, d_v=v_dim, d_model=args.d_model, d_out=args.d_out, seq_len=seq_len,
embed_args=args.embed, block_args=args.block, d_ko=k_extra_dim, d_qo=q_extra_dim,
d_vo=v_extra_dim, eps=eps, use_amp=use_amp, amp_dtype=amp_dtype)
class LayerNorm(nn.Module):
"Construct a layernorm module"
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class InstanceNorm(nn.Module):
"Construct a InstanceNorm module"
def __init__(self, eps=1e-6):
super(InstanceNorm, self).__init__()
self.eps = eps
def forward(self, x):
mean = x.mean(0, keepdim=True)
std = x.std(0, keepdim=True)
return (x - mean) / (std + self.eps)
def attention(query, key, kernel_type):
"""
Compute Attention Scores
query: [batch_size, n_heads, query_len, d_kq] or [batch_size, query_len, d_kq]
key: [batch_size, n_heads, seq_len, d_kq] or [batch_size, seq_len, d_kq]
"""
d_kq = query.size(-1)
if kernel_type == "scaled-dot":
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_kq)
elif kernel_type == "-scaled-dot":
scores = -torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_kq)
elif kernel_type == "dot":
scores = torch.matmul(query, key.transpose(-2, -1))
elif kernel_type == "-dot":
scores = -torch.matmul(query, key.transpose(-2, -1))
elif kernel_type == "l1-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=1, dim=-1)
elif kernel_type == "-l1-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=1, dim=-1)
elif kernel_type == "l2-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1)
elif kernel_type == "-l2-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1)
elif kernel_type == "scaled-l2-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1) / math.sqrt(d_kq)
elif kernel_type == "-scaled-l2-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1) / math.sqrt(d_kq)
elif kernel_type == "cosine":
scores = torch.matmul(query, key.transpose(-2, -1)) / (
torch.norm(query, dim=-1, keepdim=True)
* torch.norm(key, dim=-1, keepdim=True).transpose(-2, -1)
)
else:
raise ValueError("Unknown kernel type: {}".format(kernel_type))
return scores
class FeedForward(nn.Module):
"Implements FFN module."
def __init__(self, d_input, d_output, d_ff, n_layer=2, act="relu", last_act="none", dropout=0.1, norm="layernorm",
residual=True, act_a=1.0, act_b=1.0, act_trainable=False, use_wn=False, eps=1e-6, skip_layers=[],
half_layers=[]):
super(FeedForward, self).__init__()
self.eps = eps
self.d_input = d_input
self.d_output = d_output
if norm == "layernorm":
self.innorm = LayerNorm(d_input, eps)
self.outnorm = LayerNorm(d_output, eps)
elif norm == "instancenorm":
self.innorm = InstanceNorm(eps)
self.outnorm = InstanceNorm(eps)
elif norm == "none":
self.innorm = nn.Identity()
self.outnorm = nn.Identity()
else:
raise ValueError("Invalid Transformer norm type")
self.dropout = nn.Dropout(dropout)
self.mlp = MLP(d_input, n_layer, d_ff, d_output, act_type=act, last_act_type=last_act, use_wn=use_wn,
a=act_a, b=act_b, trainable=act_trainable, skip_layers=skip_layers, half_layers=half_layers)
self.residual = residual
def forward(self, x):
if self.residual and x.shape[-1] == self.d_output:
return self.outnorm(x + self.dropout(self.mlp(self.innorm(x))))
else:
return self.outnorm(self.dropout(self.mlp(self.innorm(x))))
class Embeddings(nn.Module):
def __init__(self, d_k, d_q, d_v, d_model, seq_len, args, d_ko=0, d_qo=0, d_vo=0, eps=1e-6):
super(Embeddings, self).__init__()
self.d_k = d_k
self.d_q = d_q
self.d_v = d_v
self.seq_len = seq_len
self.args = args
self.embed_type = args.embed_type
self.d_model = d_model
self.share_embed = args.share_embed
self.d_ko = d_ko
self.d_qo = d_qo
self.d_vo = d_vo
self.eps = eps
| self.posenc = PoseEnc(args.pe_factor, args.pe_mult_factor) | 1 | 2023-12-08 19:51:42+00:00 | 4k |
Saibo-creator/transformers-CFG | transformers_cfg/grammar_utils.py | [
{
"identifier": "LEAF",
"path": "transformers_cfg/vocab_struct.py",
"snippet": "LEAF = -1"
},
{
"identifier": "TokenTrie",
"path": "transformers_cfg/vocab_struct.py",
"snippet": "class TokenTrie:\n def __init__(self, tokenizer):\n self.eos_token_id = tokenizer.eos_token_id\n self.tokens = []\n self.trie = {}\n self.load_tokens(tokenizer)\n\n def id2str(self, token_id):\n return self.tokens[token_id]\n\n def __len__(self):\n return len(self.tokens)\n\n def load_tokens(self, tokenizer):\n def replace_hex(match):\n hex_value = match.group(1)\n return chr(int(hex_value, 16))\n\n if \"gpt2\" in tokenizer.__class__.__name__.lower():\n special = tokenizer.additional_special_tokens_ids\n\n # Here, the decoder does a string replace on a bunch of sequences\n # like ' .' for '.'. This interferes with our assumptions, where a\n # token should always have exactly one representation.\n # Fortunately(?) text-generation-inference doesn't seem to run this\n # cleanup, so we get extraneous spaces. So, in order to generate\n # the right token set for TGI, we have to skip the space trimming.\n # See:\n # https://github.com/huggingface/transformers/blob/main/src/transformers/tokenization_utils_base.py#L3588-L3600\n def fmt_token(id):\n if id in special:\n return None\n return bytes(\n tokenizer.decode([id], clean_up_tokenization_spaces=False), \"utf-8\"\n )\n\n elif (\n \"llama\" in tokenizer.__class__.__name__.lower()\n or \"t5\" in tokenizer.__class__.__name__.lower()\n ):\n\n def fmt_token(id):\n token = tokenizer.convert_ids_to_tokens(id)\n token = re.sub(r\"<0x([0-9a-fA-F]{2})>\", replace_hex, token)\n token = token.replace(\"▁\", \" \")\n return bytes(token, \"utf-8\")\n\n else:\n print(\"Warning: unrecognized tokenizer: using default token formatting\")\n\n def fmt_token(id):\n token = tokenizer.convert_ids_to_tokens(id)\n return bytes(token, \"utf-8\")\n\n # note: vocab_size doesn't work here because there are also\n # get_added_vocab() tokens\n self.tokens = [fmt_token(i) for i in range(len(tokenizer.get_vocab()))]\n for token_id, token_bytes in enumerate(self.tokens):\n if token_bytes is not None:\n self.insert_into_trie(self.trie, token_bytes, token_id)\n\n def insert_into_trie(self, trie, token_bytes, token_id):\n current = trie\n for byte in token_bytes:\n if byte not in current:\n current[byte] = {}\n current = current[byte]\n current[LEAF] = token_id"
}
] | import logging
import sys
import time
import torch
from abc import ABC
from functools import lru_cache
from typing import Dict, List
from .vocab_struct import LEAF, TokenTrie | 3,365 | return remaining_src
def parse_rule(state, src):
name, remaining_src = parse_name(src)
remaining_src = remove_leading_white_space(remaining_src, False)
rule_id = get_symbol_id(state, name)
if remaining_src[:3] != "::=":
raise RuntimeError("expecting ::= at " + remaining_src)
remaining_src = remove_leading_white_space(remaining_src[3:], True)
remaining_src = parse_alternates(state, remaining_src, name, rule_id, False)
if remaining_src and remaining_src[0] == "\r":
remaining_src = (
remaining_src[2:] if remaining_src[1] == "\n" else remaining_src[1:]
)
elif remaining_src and remaining_src[0] == "\n":
remaining_src = remaining_src[1:]
elif remaining_src:
raise RuntimeError("expecting newline or end at " + remaining_src)
return remove_leading_white_space(remaining_src, True)
def parse_ebnf(src):
try:
state = ParseState()
grammar_repr = remove_leading_white_space(src, True)
last_grammar_repr = ""
while grammar_repr:
if last_grammar_repr:
last_parsed_rule_len = len(last_grammar_repr) - len(grammar_repr)
logger.debug(
f"last_parsed_rule: {last_grammar_repr[:last_parsed_rule_len]}"
)
last_grammar_repr = grammar_repr
grammar_repr = parse_rule(state, grammar_repr)
state.grammar_encoding.append(0xFFFF)
return state
except RuntimeError as err:
logger.warning("error parsing grammar:", err)
return ParseState()
def print_rule(file, grammar_encoding, index, symbol_id_names):
rule_id = grammar_encoding[index]
print(f"<{index}>{symbol_id_names[rule_id]} ::=", end=" ", file=file)
pos = index + 1
while grammar_encoding[pos]:
if pos - 1 > index:
print("|", end=" ", file=file)
pos += 1 # sequence size, not needed here
while grammar_encoding[pos]:
if grammar_encoding[pos] == REF_RULE_MARKER:
ref_rule_id = grammar_encoding[pos + 1]
print(
f"<{pos}>{symbol_id_names[ref_rule_id]}",
end=" ",
file=file,
)
pos += 2
else:
print("<{}>[".format(pos), end="", file=file)
num_chars = grammar_encoding[pos]
pos += 1
for i in range(0, num_chars, 2):
print(
"{}-".format(chr(grammar_encoding[pos + i])), end="", file=file
)
if i + 1 < num_chars:
print(
"{}".format(chr(grammar_encoding[pos + i + 1])),
end="",
file=file,
)
print("]", end=" ", file=file)
pos += num_chars
pos += 1
print(file=file)
return pos + 1
def print_grammar(file, state):
pos = 0
symbol_id_names = {v: k for k, v in state.symbol_ids.items()}
print("Grammar Rules:", file=file)
while state.grammar_encoding[pos] != 0xFFFF:
pos = print_rule(file, state.grammar_encoding, pos, symbol_id_names)
pos = 0
print("\nBinary representation:", file=file)
while state.grammar_encoding[pos] != 0xFFFF:
print(f"{state.grammar_encoding[pos]:04x}", end=" ", file=file)
pos += 1
print("ffff\n")
offset = 0
print("Grammar Rule Sizes:", file=file)
for i, rule_size in enumerate(state.grammar_encoding_rule_size):
print(
f"<{i}> {rule_size} {state.grammar_encoding[offset:offset+rule_size]}",
file=file,
)
offset += rule_size
###################################
# EBNF Grammar Parsing ends here #
###################################
class AbstractGrammarConstraint(ABC):
def __init__(self, grammar_str, start_rule_name, tokenizer):
state = parse_ebnf(grammar_str)
grammar_encoding = state.grammar_encoding
self.start_rule_id = state.symbol_ids.get(start_rule_name)
self.eos_token_id = tokenizer.eos_token_id
|
logger = logging.getLogger(__name__)
########################
# EBNF Grammar Parsing #
########################
END_OF_ALTERNATE_MARKER = 0
END_OF_RULE_MARKER = 0
TO_BE_FILLED_MARKER = 0
REF_RULE_MARKER = 1
LITERAL_MARKER = 2
class ParseState:
def __init__(self):
self.symbol_ids = {}
self.grammar_encoding = [] # old name: out_grammar
self.grammar_encoding_rule_size = []
def get_symbol_id(state, src):
if src not in state.symbol_ids:
state.symbol_ids[src] = len(state.symbol_ids)
return state.symbol_ids[src]
def generate_symbol_id(state, base_name):
next_id = len(state.symbol_ids)
state.symbol_ids[base_name + "_" + str(next_id)] = next_id
return next_id
def is_word_char(c):
return c.isalnum() or c == "-" or c == "_"
def hex_to_int(c):
if c.isdigit():
return int(c)
elif "a" <= c.lower() <= "f":
return ord(c.lower()) - ord("a") + 10
return -1
def remove_leading_white_space(src, newline_ok):
"""
Skips over whitespace and comments in the input string.
This function processes the input string, skipping over any spaces, tabs,
and content following a '#' character, which denotes a comment. The parsing
of a comment continues until the end of the line (denoted by newline characters
'\r' or '\n'). If the 'newline_ok' parameter is set to False, the function
will stop processing and return the remaining string upon encountering a
newline character, otherwise it will skip over newline characters as well.
Parameters:
src (str): The input string to be processed.
newline_ok (bool): A flag indicating whether encountering a newline character
should stop the parsing (False) or if it should be skipped (True).
Returns:
str: The remaining portion of the input string after skipping whitespace and comments.
"""
pos = 0
while pos < len(src) and (src[pos].isspace() or src[pos] == "#"):
if src[pos] == "#":
while pos < len(src) and src[pos] not in ("\r", "\n"):
pos += 1
else:
if not newline_ok and src[pos] in ("\r", "\n"):
break
pos += 1
return src[pos:]
def parse_name(src):
pos = 0
while pos < len(src) and is_word_char(src[pos]):
pos += 1
if pos == 0:
raise RuntimeError("expecting name at " + src)
return src[:pos], src[pos:]
def parse_char(src):
"""
parse the leading char from the input string
:param src:
:return: char, remaining_src
"""
# if we have a backslash, it's maybe an escape
if src[0] == "\\":
esc = src[1]
if esc == "x":
first = hex_to_int(src[2])
if first > -1:
second = hex_to_int(src[3])
if second > -1:
return (first << 4) + second, src[4:]
raise RuntimeError("expecting \\xNN at " + src)
elif esc in ('"', "[", "]"):
return esc, src[2:]
elif esc == "r":
return "\r", src[2:]
elif esc == "n":
return "\n", src[2:]
elif esc == "t":
return "\t", src[2:]
raise RuntimeError("unknown escape at " + src)
elif src:
return src[0], src[1:]
raise RuntimeError("unexpected end of input")
def parse_sequence(state, src, rule_name, outbuf, is_nested):
out_start_pos = len(outbuf)
# sequence size, will be replaced at end when known
outbuf.append(TO_BE_FILLED_MARKER)
last_sym_start = len(outbuf)
remaining_src = src
while remaining_src:
if remaining_src[0] == '"': # literal string
remaining_src = remaining_src[1:]
last_sym_start = len(outbuf)
while remaining_src[0] != '"':
char, remaining_src = parse_char(remaining_src)
# each char of a literal is encoded as a "range" of char - char
outbuf.append(LITERAL_MARKER)
outbuf.append(ord(char))
outbuf.append(ord(char))
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
elif remaining_src[0] == "[": # char range(s)
remaining_src = remaining_src[1:]
last_sym_start = len(outbuf)
# num chars in range - replaced at end of loop
outbuf.append(TO_BE_FILLED_MARKER)
while remaining_src[0] != "]":
char, remaining_src = parse_char(remaining_src)
outbuf.append(ord(char))
if remaining_src[0] == "-" and remaining_src[1] != "]":
endchar_pair, remaining_src = parse_char(remaining_src[1:])
outbuf.append(ord(endchar_pair))
else:
# chars that aren't part of a c1-c2 range are just doubled (i.e., c-c)
outbuf.append(ord(char))
# replace num chars with actual
outbuf[last_sym_start] = len(outbuf) - last_sym_start - 1
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
elif is_word_char(remaining_src[0]): # rule reference
name, remaining_src = parse_name(remaining_src)
ref_rule_id = get_symbol_id(state, name)
remaining_src = remove_leading_white_space(remaining_src, is_nested)
last_sym_start = len(outbuf)
outbuf.append(REF_RULE_MARKER)
outbuf.append(ref_rule_id)
elif remaining_src[0] == "(": # grouping
# parse nested alternates into synthesized rule
remaining_src = remove_leading_white_space(remaining_src[1:], True)
sub_rule_id = generate_symbol_id(state, rule_name)
remaining_src = parse_alternates(
state, remaining_src, rule_name, sub_rule_id, True
)
last_sym_start = len(outbuf)
# output reference to synthesized rule
outbuf.append(REF_RULE_MARKER)
outbuf.append(sub_rule_id)
if remaining_src[0] != ")":
raise RuntimeError("expecting ')' at " + remaining_src)
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
elif remaining_src[0] in ("*", "+", "?"): # repetition operator
if len(outbuf) - out_start_pos - 1 == 0:
raise RuntimeError(
"expecting preceeding item to */+/? at " + remaining_src
)
out_grammar = state.grammar_encoding
# apply transformation to previous symbol (last_sym_start -
# end) according to rewrite rules:
# S* --> S' ::= S S' |
# S+ --> S' ::= S S' | S
# S? --> S' ::= S |
sub_rule_id = generate_symbol_id(state, rule_name)
out_grammar.append(sub_rule_id)
sub_rule_start = len(out_grammar)
# placeholder for size of 1st alternate
out_grammar.append(TO_BE_FILLED_MARKER)
# add preceding symbol to generated rule
out_grammar.extend(outbuf[last_sym_start:])
if remaining_src[0] in ("*", "+"):
# cause generated rule to recurse
out_grammar.append(REF_RULE_MARKER)
out_grammar.append(sub_rule_id)
# apply actual size
out_grammar[sub_rule_start] = len(out_grammar) - sub_rule_start
# mark end of 1st alternate
out_grammar.append(END_OF_ALTERNATE_MARKER)
sub_rule_start = len(out_grammar)
# placeholder for size of 2nd alternate
out_grammar.append(TO_BE_FILLED_MARKER)
if remaining_src[0] == "+":
# add preceding symbol as alternate only for '+'
out_grammar.extend(outbuf[last_sym_start:])
# apply actual size of 2nd alternate
out_grammar[sub_rule_start] = len(out_grammar) - sub_rule_start
# mark end of 2nd alternate, then end of rule
out_grammar.append(END_OF_ALTERNATE_MARKER)
out_grammar.append(END_OF_RULE_MARKER)
# in original rule, replace previous symbol with reference to generated rule
outbuf[last_sym_start:] = [1, sub_rule_id]
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
else:
break
# apply actual size of this alternate sequence
outbuf[out_start_pos] = len(outbuf) - out_start_pos
# mark end of alternate
outbuf.append(END_OF_ALTERNATE_MARKER)
return remaining_src
def parse_alternates(state, src, rule_name, rule_id, is_nested):
outbuf = []
remaining_src = parse_sequence(state, src, rule_name, outbuf, is_nested)
while remaining_src and remaining_src[0] == "|":
remaining_src = remove_leading_white_space(remaining_src[1:], True)
remaining_src = parse_sequence(
state, remaining_src, rule_name, outbuf, is_nested
)
state.grammar_encoding.append(rule_id)
state.grammar_encoding.extend(outbuf)
state.grammar_encoding.append(0)
state.grammar_encoding_rule_size.append(len(outbuf) + 2)
return remaining_src
def parse_rule(state, src):
name, remaining_src = parse_name(src)
remaining_src = remove_leading_white_space(remaining_src, False)
rule_id = get_symbol_id(state, name)
if remaining_src[:3] != "::=":
raise RuntimeError("expecting ::= at " + remaining_src)
remaining_src = remove_leading_white_space(remaining_src[3:], True)
remaining_src = parse_alternates(state, remaining_src, name, rule_id, False)
if remaining_src and remaining_src[0] == "\r":
remaining_src = (
remaining_src[2:] if remaining_src[1] == "\n" else remaining_src[1:]
)
elif remaining_src and remaining_src[0] == "\n":
remaining_src = remaining_src[1:]
elif remaining_src:
raise RuntimeError("expecting newline or end at " + remaining_src)
return remove_leading_white_space(remaining_src, True)
def parse_ebnf(src):
try:
state = ParseState()
grammar_repr = remove_leading_white_space(src, True)
last_grammar_repr = ""
while grammar_repr:
if last_grammar_repr:
last_parsed_rule_len = len(last_grammar_repr) - len(grammar_repr)
logger.debug(
f"last_parsed_rule: {last_grammar_repr[:last_parsed_rule_len]}"
)
last_grammar_repr = grammar_repr
grammar_repr = parse_rule(state, grammar_repr)
state.grammar_encoding.append(0xFFFF)
return state
except RuntimeError as err:
logger.warning("error parsing grammar:", err)
return ParseState()
def print_rule(file, grammar_encoding, index, symbol_id_names):
rule_id = grammar_encoding[index]
print(f"<{index}>{symbol_id_names[rule_id]} ::=", end=" ", file=file)
pos = index + 1
while grammar_encoding[pos]:
if pos - 1 > index:
print("|", end=" ", file=file)
pos += 1 # sequence size, not needed here
while grammar_encoding[pos]:
if grammar_encoding[pos] == REF_RULE_MARKER:
ref_rule_id = grammar_encoding[pos + 1]
print(
f"<{pos}>{symbol_id_names[ref_rule_id]}",
end=" ",
file=file,
)
pos += 2
else:
print("<{}>[".format(pos), end="", file=file)
num_chars = grammar_encoding[pos]
pos += 1
for i in range(0, num_chars, 2):
print(
"{}-".format(chr(grammar_encoding[pos + i])), end="", file=file
)
if i + 1 < num_chars:
print(
"{}".format(chr(grammar_encoding[pos + i + 1])),
end="",
file=file,
)
print("]", end=" ", file=file)
pos += num_chars
pos += 1
print(file=file)
return pos + 1
def print_grammar(file, state):
pos = 0
symbol_id_names = {v: k for k, v in state.symbol_ids.items()}
print("Grammar Rules:", file=file)
while state.grammar_encoding[pos] != 0xFFFF:
pos = print_rule(file, state.grammar_encoding, pos, symbol_id_names)
pos = 0
print("\nBinary representation:", file=file)
while state.grammar_encoding[pos] != 0xFFFF:
print(f"{state.grammar_encoding[pos]:04x}", end=" ", file=file)
pos += 1
print("ffff\n")
offset = 0
print("Grammar Rule Sizes:", file=file)
for i, rule_size in enumerate(state.grammar_encoding_rule_size):
print(
f"<{i}> {rule_size} {state.grammar_encoding[offset:offset+rule_size]}",
file=file,
)
offset += rule_size
###################################
# EBNF Grammar Parsing ends here #
###################################
class AbstractGrammarConstraint(ABC):
def __init__(self, grammar_str, start_rule_name, tokenizer):
state = parse_ebnf(grammar_str)
grammar_encoding = state.grammar_encoding
self.start_rule_id = state.symbol_ids.get(start_rule_name)
self.eos_token_id = tokenizer.eos_token_id | self.token_trie = TokenTrie(tokenizer) | 1 | 2023-12-07 13:32:54+00:00 | 4k |
rinnakk/nue-asr | nue_asr/cli.py | [
{
"identifier": "transcribe",
"path": "nue_asr/transcribe.py",
"snippet": "@torch.inference_mode()\ndef transcribe(\n model: NueASRModel,\n tokenizer: PreTrainedTokenizer,\n audio: Union[str, np.ndarray, torch.Tensor],\n **decode_options,\n) -> ASRResult:\n device = model.device\n sr = 16000\n\n decode_options.setdefault(\"do_sample\", False)\n decode_options.setdefault(\"num_beams\", 1)\n decode_options.setdefault(\"temperature\", 1.0)\n decode_options.setdefault(\"top_p\", 1.0)\n decode_options.setdefault(\"min_new_tokens\", 2)\n decode_options.setdefault(\"max_new_tokens\", None)\n\n if isinstance(audio, str):\n from librosa import load\n\n audio = load(audio, sr=sr)[0]\n\n if not torch.is_tensor(audio):\n audio = torch.from_numpy(audio)\n\n if audio.dim() != 1:\n assert audio.dim() == 2 and audio.shape[0] == 1, \"Only mono audio is supported.\"\n\n audio = audio.to(model.dtype).reshape(1, -1)\n audio_len_sec = audio.shape[-1] / sr\n if decode_options[\"max_new_tokens\"] is None:\n decode_options[\"max_new_tokens\"] = int(4 * audio_len_sec + 20 + 0.5)\n\n if audio_len_sec > WARN_TOO_LONG_THRESHOLD:\n logger.warning(\n f\"The input audio is {audio_len_sec:.1f} sec, \"\n \"but such long audio inputs may degrade recognition accuracy. \"\n \"It is recommended to split the audio into shorter segments.\"\n )\n\n prefix_token = tokenizer.encode(\n \"<s>\",\n add_special_tokens=False,\n return_tensors=\"pt\",\n )\n postfix_token = tokenizer.encode(\n \"[SEP]\",\n add_special_tokens=False,\n return_tensors=\"pt\",\n )\n outputs = model(\n prefix_token.to(device),\n audio.to(device),\n postfix_token.to(device),\n pad_token_id=tokenizer.pad_token_id,\n bos_token_id=tokenizer.bos_token_id,\n eos_token_id=tokenizer.eos_token_id,\n **decode_options,\n )\n output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)\n\n return ASRResult(text=output_text)"
},
{
"identifier": "load_model",
"path": "nue_asr/utils.py",
"snippet": "def load_model(\n model_name_or_path: Optional[str] = None,\n device: Optional[Union[str, torch.device]] = \"cuda\",\n fp16: bool = True,\n use_deepspeed: bool = False,\n) -> NueASRModel:\n if model_name_or_path is None:\n model_name_or_path = DEFAULT_MODEL_NAME\n\n device = torch.device(device)\n if device.type == \"cpu\":\n if torch.cuda.is_available():\n logging.warning(\n \"CUDA is available but using CPU. \"\n \"If you want to use CUDA, set `device` to `cuda`.\"\n )\n if fp16:\n logging.warning(\"FP16 is not supported on CPU. Using FP32 instead.\")\n fp16 = False\n if use_deepspeed:\n logging.warning(\"DeepSpeed is not supported on CPU. Disabling it.\")\n use_deepspeed = False\n\n dtype = torch.float16 if fp16 else torch.float32\n\n model = NueASRModel.from_pretrained(model_name_or_path)\n model.to(dtype)\n\n if use_deepspeed:\n try:\n import deepspeed\n except ImportError:\n raise ImportError(\n \"DeepSpeed is not installed. Please install it with `pip install deepspeed`.\"\n )\n\n ds_engine = deepspeed.init_inference(\n model.llm,\n replace_with_kernel_inject=True,\n dtype=dtype,\n )\n for m in ds_engine.modules():\n if (\n getattr(m, \"config\", None)\n and getattr(m.config, \"mlp_after_attn\", None) is not None\n ):\n m.config.mlp_after_attn = not model.llm.config.use_parallel_residual\n model.llm = ds_engine.module\n\n if device is not None:\n model.to(device)\n\n logger.info(f\"Finished loading model from {model_name_or_path}\")\n\n return model"
},
{
"identifier": "load_tokenizer",
"path": "nue_asr/utils.py",
"snippet": "def load_tokenizer(model_name_or_path: Optional[str] = None):\n if model_name_or_path is None:\n model_name_or_path = DEFAULT_MODEL_NAME\n\n tokenizer = AutoTokenizer.from_pretrained(\n model_name_or_path, use_fast=False, legacy=True\n )\n return tokenizer"
},
{
"identifier": "set_seed",
"path": "nue_asr/utils.py",
"snippet": "def set_seed(seed: int):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)"
},
{
"identifier": "str2bool",
"path": "nue_asr/utils.py",
"snippet": "def str2bool(v: str):\n if v.lower() in (\"true\", \"t\", \"yes\", \"y\", \"1\"):\n return True\n if v.lower() in (\"false\", \"f\", \"no\", \"n\", \"0\"):\n return False\n raise ValueError(f\"Invalid boolean value: {v}\")"
}
] | import argparse
import os
import torch
from .transcribe import transcribe
from .utils import load_model, load_tokenizer, set_seed, str2bool | 1,836 | #!/usr/bin/env python3
# Copyright 2023 rinna Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def cli_main():
default_device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"audio_files",
nargs="+",
type=str,
help="Audio file paths",
)
parser.add_argument(
"--model",
type=str,
default=None,
help="Model name or path",
)
parser.add_argument(
"--device",
type=str,
default=default_device,
help="Device to use for inference.",
)
parser.add_argument(
"--fp16", type=str2bool, default=True, help="Whether to fp16 inference."
)
parser.add_argument(
"--use-deepspeed",
action="store_true",
help="Whether to use DeepSpeed-Inference.",
)
group = parser.add_argument_group("Sequence generation options")
group.add_argument(
"--do-sample",
action="store_true",
help="Whether or not to use sampling; use greedy decoding otherwise.",
)
group.add_argument(
"--num-beams",
type=int,
default=1,
help="Number of beams for beam search. 1 means no beam search.",
)
group.add_argument(
"--temperature",
type=float,
default=1.0,
help="The value used to modulate the next token probabilities.",
)
group.add_argument(
"--top-p",
type=float,
default=1.0,
help="The value used to modulate the next token probabilities.",
)
group.add_argument(
"--min-new-tokens",
type=int,
default=2,
help="The minimum length of the sequence to be generated.",
)
group.add_argument(
"--max-new-tokens",
type=int,
default=None,
help="The maximum numbers of tokens to generate.",
)
args = parser.parse_args()
set_seed(1234)
| #!/usr/bin/env python3
# Copyright 2023 rinna Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def cli_main():
default_device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"audio_files",
nargs="+",
type=str,
help="Audio file paths",
)
parser.add_argument(
"--model",
type=str,
default=None,
help="Model name or path",
)
parser.add_argument(
"--device",
type=str,
default=default_device,
help="Device to use for inference.",
)
parser.add_argument(
"--fp16", type=str2bool, default=True, help="Whether to fp16 inference."
)
parser.add_argument(
"--use-deepspeed",
action="store_true",
help="Whether to use DeepSpeed-Inference.",
)
group = parser.add_argument_group("Sequence generation options")
group.add_argument(
"--do-sample",
action="store_true",
help="Whether or not to use sampling; use greedy decoding otherwise.",
)
group.add_argument(
"--num-beams",
type=int,
default=1,
help="Number of beams for beam search. 1 means no beam search.",
)
group.add_argument(
"--temperature",
type=float,
default=1.0,
help="The value used to modulate the next token probabilities.",
)
group.add_argument(
"--top-p",
type=float,
default=1.0,
help="The value used to modulate the next token probabilities.",
)
group.add_argument(
"--min-new-tokens",
type=int,
default=2,
help="The minimum length of the sequence to be generated.",
)
group.add_argument(
"--max-new-tokens",
type=int,
default=None,
help="The maximum numbers of tokens to generate.",
)
args = parser.parse_args()
set_seed(1234) | model = load_model( | 1 | 2023-12-07 01:37:23+00:00 | 4k |
AdaCheng/EgoThink | models/instruct_blip/models/clip_vit.py | [
{
"identifier": "convert_weights_to_fp16",
"path": "models/instruct_blip/models/eva_vit.py",
"snippet": "def convert_weights_to_fp16(model: nn.Module):\n \"\"\"Convert applicable model parameters to fp16\"\"\"\n\n def _convert_weights_to_fp16(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\n l.weight.data = l.weight.data.half()\n if l.bias is not None:\n l.bias.data = l.bias.data.half()\n\n# if isinstance(l, (nn.MultiheadAttention, Attention)):\n# for attr in [*[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]], \"in_proj_bias\", \"bias_k\", \"bias_v\"]:\n# tensor = getattr(l, attr)\n# if tensor is not None:\n# tensor.data = tensor.data.half()\n\n model.apply(_convert_weights_to_fp16)"
},
{
"identifier": "download_cached_file",
"path": "models/instruct_blip/common/dist_utils.py",
"snippet": "def download_cached_file(url, check_hash=True, progress=False):\n \"\"\"\n Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.\n If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.\n \"\"\"\n\n def get_cached_file_path():\n # a hack to sync the file path across processes\n parts = torch.hub.urlparse(url)\n filename = os.path.basename(parts.path)\n cached_file = os.path.join(timm_hub.get_cache_dir(), filename)\n\n return cached_file\n\n if is_main_process():\n timm_hub.download_cached_file(url, check_hash, progress)\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n return get_cached_file_path()"
}
] | from collections import OrderedDict
from itertools import repeat
from torch import nn
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
from .eva_vit import convert_weights_to_fp16
from ..common.dist_utils import download_cached_file
import collections.abc
import math
import torch
import torch.nn.functional as F | 2,836 | ("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
if use_grad_checkpointing:
self.attn = checkpoint_wrapper(self.attn)
self.mlp = checkpoint_wrapper(self.mlp)
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask, use_grad_checkpointing and i>12) for i in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, use_grad_checkpointing: bool):
super().__init__()
self.input_resolution = input_resolution
self.num_features = width
self.num_heads = heads
self.num_patches = (input_resolution // patch_size) ** 2
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn(self.num_patches + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, use_grad_checkpointing=use_grad_checkpointing)
# self.ln_final = LayerNorm(width)
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
# x = self.ln_final(x)
return x
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_2tuple = _ntuple(2)
def interpolate_pos_embed(model, state_dict, interpolation: str = 'bicubic', seq_dim=1):
# Rescale the grid of position embeddings when loading from state_dict
old_pos_embed = state_dict.get('positional_embedding', None)
grid_size = round((model.positional_embedding.shape[0] - 1) ** 0.5)
if old_pos_embed is None:
return
grid_size = to_2tuple(grid_size)
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
if new_seq_len == old_pos_embed.shape[0]:
return
if extra_tokens:
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
else:
pos_emb_tok, pos_emb_img = None, old_pos_embed
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
print('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
pos_emb_img = F.interpolate(
pos_emb_img,
size=grid_size,
mode=interpolation,
align_corners=True,
)
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
if pos_emb_tok is not None:
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
else:
new_pos_embed = pos_emb_img
state_dict['positional_embedding'] = new_pos_embed
def create_clip_vit_L(img_size=224,use_checkpoint=False,precision="fp16"):
model = VisionTransformer(
input_resolution=img_size,
patch_size=14,
width=1024,
layers=23,
heads=16,
use_grad_checkpointing=use_checkpoint,
)
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/clip_vit_L.pth"
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
if use_grad_checkpointing:
self.attn = checkpoint_wrapper(self.attn)
self.mlp = checkpoint_wrapper(self.mlp)
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask, use_grad_checkpointing and i>12) for i in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, use_grad_checkpointing: bool):
super().__init__()
self.input_resolution = input_resolution
self.num_features = width
self.num_heads = heads
self.num_patches = (input_resolution // patch_size) ** 2
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn(self.num_patches + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, use_grad_checkpointing=use_grad_checkpointing)
# self.ln_final = LayerNorm(width)
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
# x = self.ln_final(x)
return x
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_2tuple = _ntuple(2)
def interpolate_pos_embed(model, state_dict, interpolation: str = 'bicubic', seq_dim=1):
# Rescale the grid of position embeddings when loading from state_dict
old_pos_embed = state_dict.get('positional_embedding', None)
grid_size = round((model.positional_embedding.shape[0] - 1) ** 0.5)
if old_pos_embed is None:
return
grid_size = to_2tuple(grid_size)
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
if new_seq_len == old_pos_embed.shape[0]:
return
if extra_tokens:
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
else:
pos_emb_tok, pos_emb_img = None, old_pos_embed
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
print('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
pos_emb_img = F.interpolate(
pos_emb_img,
size=grid_size,
mode=interpolation,
align_corners=True,
)
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
if pos_emb_tok is not None:
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
else:
new_pos_embed = pos_emb_img
state_dict['positional_embedding'] = new_pos_embed
def create_clip_vit_L(img_size=224,use_checkpoint=False,precision="fp16"):
model = VisionTransformer(
input_resolution=img_size,
patch_size=14,
width=1024,
layers=23,
heads=16,
use_grad_checkpointing=use_checkpoint,
)
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/clip_vit_L.pth" | cached_file = download_cached_file( | 1 | 2023-12-05 14:17:17+00:00 | 4k |
3dlg-hcvc/cage | metrics/aor.py | [
{
"identifier": "get_bbox_vertices",
"path": "objects/dict_utils.py",
"snippet": "def get_bbox_vertices(obj_dict, part_idx):\n \"\"\"\n Get the 8 vertices of the bounding box\\n\n The order of the vertices is the same as the order that pytorch3d.ops.box3d_overlap expects\\n\n (This order is not necessary since we are not using pytorch3d.ops.box3d_overlap anymore)\\n\n\n - bbox_center: the center of the bounding box in the form: [cx, cy, cz]\\n\n - bbox_size: the size of the bounding box in the form: [lx, ly, lz]\\n\n\n Return:\\n\n - bbox_vertices: the 8 vertices of the bounding box in the form: [[x0, y0, z0], [x1, y1, z1], ...]\n \"\"\"\n\n part = obj_dict[\"diffuse_tree\"][part_idx]\n bbox_center = np.array(part[\"aabb\"][\"center\"])\n bbox_size_half = np.array(part[\"aabb\"][\"size\"]) / 2\n\n bbox_vertices = np.zeros((8, 3))\n\n # Get the 8 vertices of the bounding box in the order that pytorch3d.ops.box3d_overlap expects:\n # 0: (x0, y0, z0) # 1: (x1, y0, z0) # 2: (x1, y1, z0) # 3: (x0, y1, z0)\n # 4: (x0, y0, z1) # 5: (x1, y0, z1) # 6: (x1, y1, z1) # 7: (x0, y1, z1)\n bbox_vertices[0, :] = bbox_center - bbox_size_half\n bbox_vertices[1, :] = bbox_center + np.array([bbox_size_half[0], -bbox_size_half[1], -bbox_size_half[2]])\n bbox_vertices[2, :] = bbox_center + np.array([bbox_size_half[0], bbox_size_half[1], -bbox_size_half[2]])\n bbox_vertices[3, :] = bbox_center + np.array([-bbox_size_half[0], bbox_size_half[1], -bbox_size_half[2]])\n bbox_vertices[4, :] = bbox_center + np.array([-bbox_size_half[0], -bbox_size_half[1], bbox_size_half[2]])\n bbox_vertices[5, :] = bbox_center + np.array([bbox_size_half[0], -bbox_size_half[1], bbox_size_half[2]])\n bbox_vertices[6, :] = bbox_center + bbox_size_half\n bbox_vertices[7, :] = bbox_center + np.array([-bbox_size_half[0], bbox_size_half[1], bbox_size_half[2]])\n\n return bbox_vertices"
},
{
"identifier": "get_base_part_idx",
"path": "objects/dict_utils.py",
"snippet": "def get_base_part_idx(obj_dict):\n \"\"\"\n Get the index of the base part in the object dictionary\\n\n\n - obj_dict: the object dictionary\\n\n\n Return:\\n\n - base_part_idx: the index of the base part\n \"\"\"\n\n # Adjust for NAP's corner case\n base_part_ids = np.where([part[\"parent\"] == -1 for part in obj_dict[\"diffuse_tree\"]])[0]\n if len(base_part_ids) > 0:\n return base_part_ids[0].item()\n else:\n raise ValueError(\"No base part found\")"
}
] | import sys, os, json
import numpy as np
import quaternion
from iou import sampling_iou
from objects.dict_utils import get_bbox_vertices, get_base_part_idx
from tqdm import tqdm
from copy import deepcopy | 2,121 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
'''
AOR: Average Overlapping Ratio
- compute the vIoU between the sibling parts of the objects
'''
def transform_all_parts(bbox_vertices, obj_dict, joint_state, use_plucker=False, dry_run=True):
"""
Transform all parts of the object according to the joint state\n
- bbox_vertices: the bounding box vertices of the object in rest pose in the form: [[x0, y0, z0], [x1, y1, z1], ...]\n
- obj_dict: the object dictionary\n
- joint_state: the joint state in the range of [0, 1]\n
- use_plucker (optional): whether to use plucker coordinate to transform the parts\n
- dry_run (optional): if True, only return the transformation matrices without transforming the parts\n
Return:\n
- part_transformations: the transformation matrices used to transform the parts\n
"""
# Get a visit order of the parts such that children parts are visited before parents
part_visit_order = []
base_idx = get_base_part_idx(obj_dict)
indices_to_visit = [base_idx]
while len(indices_to_visit) > 0: # Breadth-first traversal
current_idx = indices_to_visit.pop(0)
part_visit_order.append(current_idx)
indices_to_visit += obj_dict["diffuse_tree"][current_idx]["children"]
part_visit_order.reverse()
part_transformations = [[] for _ in range(len(obj_dict["diffuse_tree"]))]
# Transform the parts in the visit order - children first, then parents
for i in part_visit_order:
part = obj_dict["diffuse_tree"][i]
joint = part["joint"]
children_idxs = part["children"]
# Store the transformation used to transform the part and its children
applied_tramsformation_matrix = np.eye(4)
applied_rotation_axis_origin = np.array([np.nan, np.nan, np.nan])
applied_transformation_type = "none"
if not use_plucker: # Direct translation and rotation
if joint["type"] == "prismatic":
# Translate the part and its children
translation = np.array(joint["axis"]["direction"]) * joint["range"][1] * joint_state
if not dry_run:
bbox_vertices[[i] + children_idxs] += translation
# Store the transformation used
applied_tramsformation_matrix[:3, 3] = translation
applied_transformation_type = "translation"
elif joint["type"] == "revolute" or joint["type"] == "continuous":
if joint["type"] == "revolute":
rotation_radian = np.radians(joint["range"][1] * joint_state)
else:
rotation_radian = np.radians(360 * joint_state)
# Prepare the rotation matrix via axis-angle representation and quaternion
rotation_axis_origin = np.array(joint["axis"]["origin"])
rotation_axis_direction = np.array(joint["axis"]["direction"]) / np.linalg.norm(joint["axis"]["direction"])
rotation_matrix = quaternion.as_rotation_matrix(quaternion.from_rotation_vector(rotation_radian * rotation_axis_direction))
if not dry_run:
# Rotate the part and its children
vertices_to_rotate = (bbox_vertices[[i] + children_idxs] - rotation_axis_origin)
bbox_vertices[[i] + children_idxs] = np.matmul(rotation_matrix, vertices_to_rotate.transpose([0, 2, 1])).transpose([0, 2, 1]) + rotation_axis_origin
# Store the transformation used
applied_tramsformation_matrix[:3, :3] = rotation_matrix
applied_rotation_axis_origin = rotation_axis_origin
applied_transformation_type = "rotation"
else: # Translation and rotation together using the plucker coordinate as in NAP
plucker_direction = np.array(joint["axis"]["plucker"])[:3]
plucker_moment = np.array(joint["axis"]["plucker"])[3:]
translation_distance = joint["raw_ranges"][0][1] * joint_state
rotation_radian = np.radians(joint["raw_ranges"][1][1] * joint_state)
# Prepare the transformation matrix via plucker coordinate using equation (1) in NAP
transformation_matrix = np.eye(4)
rotation_matrix = quaternion.as_rotation_matrix(quaternion.from_rotation_vector(rotation_radian * plucker_direction))
translation = (np.eye(3) - rotation_matrix) @ np.cross(plucker_direction, plucker_moment) + plucker_direction * translation_distance
transformation_matrix[:3, :3] = rotation_matrix
transformation_matrix[:3, 3] = translation
if not dry_run:
# Transform the part and its children via homogeneous coordinates
vertices_to_transform = np.concatenate([bbox_vertices[[i] + children_idxs], np.ones((len([i] + children_idxs), 8, 1))], axis=2)
bbox_vertices[[i] + children_idxs] = np.matmul(transformation_matrix, vertices_to_transform.transpose([0, 2, 1])).transpose([0, 2, 1])[:, :, :3]
# Store the transformation used
applied_tramsformation_matrix = transformation_matrix
applied_transformation_type = "plucker"
# Record the transformation used
if not applied_transformation_type == "none":
record = {
"type": applied_transformation_type,
"matrix": applied_tramsformation_matrix,
"rotation_axis_origin": applied_rotation_axis_origin
}
for idx in [i] + children_idxs:
part_transformations[idx].append(record)
return part_transformations
def AOR(tgt, num_states=20, transform_use_plucker=False):
tree = tgt["diffuse_tree"]
states = np.linspace(0, 1, num_states)
| sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
'''
AOR: Average Overlapping Ratio
- compute the vIoU between the sibling parts of the objects
'''
def transform_all_parts(bbox_vertices, obj_dict, joint_state, use_plucker=False, dry_run=True):
"""
Transform all parts of the object according to the joint state\n
- bbox_vertices: the bounding box vertices of the object in rest pose in the form: [[x0, y0, z0], [x1, y1, z1], ...]\n
- obj_dict: the object dictionary\n
- joint_state: the joint state in the range of [0, 1]\n
- use_plucker (optional): whether to use plucker coordinate to transform the parts\n
- dry_run (optional): if True, only return the transformation matrices without transforming the parts\n
Return:\n
- part_transformations: the transformation matrices used to transform the parts\n
"""
# Get a visit order of the parts such that children parts are visited before parents
part_visit_order = []
base_idx = get_base_part_idx(obj_dict)
indices_to_visit = [base_idx]
while len(indices_to_visit) > 0: # Breadth-first traversal
current_idx = indices_to_visit.pop(0)
part_visit_order.append(current_idx)
indices_to_visit += obj_dict["diffuse_tree"][current_idx]["children"]
part_visit_order.reverse()
part_transformations = [[] for _ in range(len(obj_dict["diffuse_tree"]))]
# Transform the parts in the visit order - children first, then parents
for i in part_visit_order:
part = obj_dict["diffuse_tree"][i]
joint = part["joint"]
children_idxs = part["children"]
# Store the transformation used to transform the part and its children
applied_tramsformation_matrix = np.eye(4)
applied_rotation_axis_origin = np.array([np.nan, np.nan, np.nan])
applied_transformation_type = "none"
if not use_plucker: # Direct translation and rotation
if joint["type"] == "prismatic":
# Translate the part and its children
translation = np.array(joint["axis"]["direction"]) * joint["range"][1] * joint_state
if not dry_run:
bbox_vertices[[i] + children_idxs] += translation
# Store the transformation used
applied_tramsformation_matrix[:3, 3] = translation
applied_transformation_type = "translation"
elif joint["type"] == "revolute" or joint["type"] == "continuous":
if joint["type"] == "revolute":
rotation_radian = np.radians(joint["range"][1] * joint_state)
else:
rotation_radian = np.radians(360 * joint_state)
# Prepare the rotation matrix via axis-angle representation and quaternion
rotation_axis_origin = np.array(joint["axis"]["origin"])
rotation_axis_direction = np.array(joint["axis"]["direction"]) / np.linalg.norm(joint["axis"]["direction"])
rotation_matrix = quaternion.as_rotation_matrix(quaternion.from_rotation_vector(rotation_radian * rotation_axis_direction))
if not dry_run:
# Rotate the part and its children
vertices_to_rotate = (bbox_vertices[[i] + children_idxs] - rotation_axis_origin)
bbox_vertices[[i] + children_idxs] = np.matmul(rotation_matrix, vertices_to_rotate.transpose([0, 2, 1])).transpose([0, 2, 1]) + rotation_axis_origin
# Store the transformation used
applied_tramsformation_matrix[:3, :3] = rotation_matrix
applied_rotation_axis_origin = rotation_axis_origin
applied_transformation_type = "rotation"
else: # Translation and rotation together using the plucker coordinate as in NAP
plucker_direction = np.array(joint["axis"]["plucker"])[:3]
plucker_moment = np.array(joint["axis"]["plucker"])[3:]
translation_distance = joint["raw_ranges"][0][1] * joint_state
rotation_radian = np.radians(joint["raw_ranges"][1][1] * joint_state)
# Prepare the transformation matrix via plucker coordinate using equation (1) in NAP
transformation_matrix = np.eye(4)
rotation_matrix = quaternion.as_rotation_matrix(quaternion.from_rotation_vector(rotation_radian * plucker_direction))
translation = (np.eye(3) - rotation_matrix) @ np.cross(plucker_direction, plucker_moment) + plucker_direction * translation_distance
transformation_matrix[:3, :3] = rotation_matrix
transformation_matrix[:3, 3] = translation
if not dry_run:
# Transform the part and its children via homogeneous coordinates
vertices_to_transform = np.concatenate([bbox_vertices[[i] + children_idxs], np.ones((len([i] + children_idxs), 8, 1))], axis=2)
bbox_vertices[[i] + children_idxs] = np.matmul(transformation_matrix, vertices_to_transform.transpose([0, 2, 1])).transpose([0, 2, 1])[:, :, :3]
# Store the transformation used
applied_tramsformation_matrix = transformation_matrix
applied_transformation_type = "plucker"
# Record the transformation used
if not applied_transformation_type == "none":
record = {
"type": applied_transformation_type,
"matrix": applied_tramsformation_matrix,
"rotation_axis_origin": applied_rotation_axis_origin
}
for idx in [i] + children_idxs:
part_transformations[idx].append(record)
return part_transformations
def AOR(tgt, num_states=20, transform_use_plucker=False):
tree = tgt["diffuse_tree"]
states = np.linspace(0, 1, num_states) | original_bbox_vertices = np.array([get_bbox_vertices(tgt, i) for i in range(len(tgt["diffuse_tree"]))]) | 0 | 2023-12-06 23:08:41+00:00 | 4k |
duxiaodan/intrinsic-lora | sd_single_diode_pseudo_normal.py | [
{
"identifier": "plot_normal_map",
"path": "diode/diode.py",
"snippet": "def plot_normal_map(normal_map):\n normal_viz = normal_map[:, ::, :]\n\n #Normalize normals\n normi = np.where(np.sum(normal_viz,axis=2)!=0.)\n zero_mask = np.equal(np.sum(normal_viz, 2, keepdims=True), 0.).astype(np.float32)\n linalg_norm = np.sqrt((normal_viz[normi] * normal_viz[normi]).sum(axis=1,keepdims=True))\n normal_viz[normi] = normal_viz[normi]/(linalg_norm+1e-10)\n #Reverse color convention for both Y and Z axis to be consistent with Omnidatav2\n normal_viz[:,:,1:] = normal_viz[:,:,1:]*-1\n #Make masked area [-1,-1,-1]\n normal_viz = normal_viz + zero_mask*(-1)\n normal_viz = (normal_viz +1)/2.\n \n normal_mask = Image.fromarray(np.uint8(255*(normal_viz.sum(2)>0.)))\n normal_viz_img = Image.fromarray(np.uint8(normal_viz*255)).convert('RGB')\n return normal_viz_img, normal_mask"
},
{
"identifier": "check_and_tuplize_tokens",
"path": "diode/diode.py",
"snippet": "def check_and_tuplize_tokens(tokens, valid_tokens):\n if not isinstance(tokens, (tuple, list)):\n tokens = (tokens, )\n for split in tokens:\n assert split in valid_tokens\n return tokens"
},
{
"identifier": "enumerate_paths",
"path": "diode/diode.py",
"snippet": "def enumerate_paths(src):\n '''flatten out a nested dictionary into an iterable\n DIODE metadata is a nested dictionary;\n One could easily query a particular scene and scan, but sequentially\n enumerating files in a nested dictionary is troublesome. This function\n recursively traces out and aggregates the leaves of a tree.\n '''\n if isinstance(src, list):\n return src\n elif isinstance(src, dict):\n acc = []\n for k, v in src.items():\n _sub_paths = enumerate_paths(v)\n _sub_paths = list(map(lambda x: osp.join(k, x), _sub_paths))\n acc.append(_sub_paths)\n return list(chain.from_iterable(acc))\n else:\n raise ValueError('do not accept data type {}'.format(type(src)))"
},
{
"identifier": "_VALID_SPLITS",
"path": "diode/diode.py",
"snippet": "_VALID_SPLITS = ('train', 'val', 'test')"
},
{
"identifier": "_VALID_SCENE_TYPES",
"path": "diode/diode.py",
"snippet": "_VALID_SCENE_TYPES = ('indoors', 'outdoor')"
}
] | import argparse
import logging
import math
import os
import os.path as osp
import random
import shutil
import wandb
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import torch.utils.checkpoint
import transformers
import diffusers
import copy
import json
import datetime
import wandb
import xformers
import bitsandbytes as bnb
from pathlib import Path
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from torch.utils.data import Dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel, DPMSolverMultistepScheduler
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.optimization import get_scheduler
from diffusers.utils import is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from PIL import Image
from PIL.ImageOps import exif_transpose
from diode.diode import (
plot_normal_map,
check_and_tuplize_tokens,
enumerate_paths,
_VALID_SPLITS,
_VALID_SCENE_TYPES
) | 2,585 | ):
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * vae.config.scaling_factor
bsz = latents.shape[0]
timesteps = torch.randint(max_timestep-1, max_timestep, (bsz,), device=latents.device)
timesteps = timesteps.long()
original_image_embeds = vae.encode(batch["original_pixel_values"].to(weight_dtype)).latent_dist.mode()
original_image_embeds = original_image_embeds * vae.config.scaling_factor
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
model_pred = unet(original_image_embeds, timesteps, encoder_hidden_states).sample
images = vae.decode(model_pred / vae.config.scaling_factor, return_dict=False)[0]
return images
@torch.inference_mode()
def log_validation(
text_encoder,
tokenizer,
unet,
vae,
args,
accelerator,
test_batches,
train_batch,
weight_dtype,
epoch,
global_step
):
unwrapped_unet=accelerator.unwrap_model(unet)
unwrapped_text_encoder=accelerator.unwrap_model(text_encoder)
unwrapped_vae = accelerator.unwrap_model(vae)
max_timestep = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler").config.num_train_timesteps
val_test_images1 = []
val_test_images2 = []
val_train_images = []
test_batch1 = test_batches[0]
test_batch2 = test_batches[1]
with torch.cuda.amp.autocast():
images = val_inference_pipe(test_batch1, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_test_images1.extend([Image.fromarray(tensor2np(img)) for img in images])
images = val_inference_pipe(test_batch2, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_test_images2.extend([Image.fromarray(tensor2np(img)) for img in images])
images = val_inference_pipe(train_batch, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_train_images.extend([Image.fromarray(tensor2np(img)) for img in images])
concat_test_images1 = []
concat_test_images2 = []
concat_train_images = []
for gt, im_1, im_2, im_3 in zip(test_batch1['gt_values'],test_batch1['original_pixel_values'],test_batch1['pixel_values'],val_test_images1):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images1.append(output_img)
for gt, im_1, im_2, im_3 in zip(test_batch2['gt_values'],test_batch2['original_pixel_values'],test_batch2['pixel_values'],val_test_images2):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images2.append(output_img)
for gt, im_1, im_2, im_3 in zip(train_batch['gt_values'],train_batch['original_pixel_values'],train_batch['pixel_values'],val_train_images):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_train_images.append(output_img)
for tracker in accelerator.trackers:
if tracker.name == "wandb":
tracker.log(
{
"validation: training images": [
wandb.Image(image, )
for i, image in enumerate(concat_train_images)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 1": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images1)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 2": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images2)
],
},
step=global_step
)
torch.cuda.empty_cache()
return
class PSEUDODataset(Dataset):
def __init__(
self,
data_root,
pseudo_root,
tokenizer,
splits,
scene_types,
size=512,
center_crop=True,
num_train_imgs=None,
tokenizer_max_length=None,
empty_prompt = False,
unified_prompt = None,
):
self.data_root = Path(data_root)
self.pseudo_root = Path(pseudo_root)
| # coding=utf-8
# Intrinsic-LoRA
"""Intrinsic-LoRA Single UNet model for surface normal training"""
logger = get_logger(__name__, log_level="INFO")
def save_model_card(repo_id: str, images=None, base_model=str, dataset_name=str, repo_folder=None):
img_str = ""
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"\n"
yaml = f"""
---
license: creativeml-openrail-m
base_model: {base_model}
tags:
- stable-diffusion
- stable-diffusion-diffusers
- text-to-image
- diffusers
- lora
inference: true
---
"""
model_card = f"""
# LoRA text2image fine-tuning - {repo_id}
These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n
{img_str}
"""
with open(os.path.join(repo_folder, "README.md"), "w") as f:
f.write(yaml + model_card)
def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None):
if tokenizer_max_length is not None:
max_length = tokenizer_max_length
else:
max_length = tokenizer.model_max_length
text_inputs = tokenizer(
prompt,
truncation=True,
padding="max_length",
max_length=max_length,
return_tensors="pt",
)
return text_inputs
def tensor2np(tensor):
return (255*(tensor.cpu().permute(1,2,0).numpy()*0.5+0.5)).astype(np.uint8)
def listPILToTensor(listPILs):
size = listPILs[0].size[0]
image_transforms = transforms.Compose(
[
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
return torch.stack([image_transforms(p) for p in listPILs])
def visualization_routine(gt,im_1,im_2,im_3):
gt = tensor2np(gt)
im_1 = tensor2np(im_1)
im_2 = tensor2np(im_2)
im_3 = np.array(im_3)
return Image.fromarray(np.hstack((im_1,gt,im_2,im_3)))
@torch.inference_mode()
def val_inference_pipe(
batch,
vae,
unet,
text_encoder,
weight_dtype,
max_timestep
):
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * vae.config.scaling_factor
bsz = latents.shape[0]
timesteps = torch.randint(max_timestep-1, max_timestep, (bsz,), device=latents.device)
timesteps = timesteps.long()
original_image_embeds = vae.encode(batch["original_pixel_values"].to(weight_dtype)).latent_dist.mode()
original_image_embeds = original_image_embeds * vae.config.scaling_factor
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
model_pred = unet(original_image_embeds, timesteps, encoder_hidden_states).sample
images = vae.decode(model_pred / vae.config.scaling_factor, return_dict=False)[0]
return images
@torch.inference_mode()
def log_validation(
text_encoder,
tokenizer,
unet,
vae,
args,
accelerator,
test_batches,
train_batch,
weight_dtype,
epoch,
global_step
):
unwrapped_unet=accelerator.unwrap_model(unet)
unwrapped_text_encoder=accelerator.unwrap_model(text_encoder)
unwrapped_vae = accelerator.unwrap_model(vae)
max_timestep = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler").config.num_train_timesteps
val_test_images1 = []
val_test_images2 = []
val_train_images = []
test_batch1 = test_batches[0]
test_batch2 = test_batches[1]
with torch.cuda.amp.autocast():
images = val_inference_pipe(test_batch1, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_test_images1.extend([Image.fromarray(tensor2np(img)) for img in images])
images = val_inference_pipe(test_batch2, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_test_images2.extend([Image.fromarray(tensor2np(img)) for img in images])
images = val_inference_pipe(train_batch, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_train_images.extend([Image.fromarray(tensor2np(img)) for img in images])
concat_test_images1 = []
concat_test_images2 = []
concat_train_images = []
for gt, im_1, im_2, im_3 in zip(test_batch1['gt_values'],test_batch1['original_pixel_values'],test_batch1['pixel_values'],val_test_images1):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images1.append(output_img)
for gt, im_1, im_2, im_3 in zip(test_batch2['gt_values'],test_batch2['original_pixel_values'],test_batch2['pixel_values'],val_test_images2):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images2.append(output_img)
for gt, im_1, im_2, im_3 in zip(train_batch['gt_values'],train_batch['original_pixel_values'],train_batch['pixel_values'],val_train_images):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_train_images.append(output_img)
for tracker in accelerator.trackers:
if tracker.name == "wandb":
tracker.log(
{
"validation: training images": [
wandb.Image(image, )
for i, image in enumerate(concat_train_images)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 1": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images1)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 2": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images2)
],
},
step=global_step
)
torch.cuda.empty_cache()
return
class PSEUDODataset(Dataset):
def __init__(
self,
data_root,
pseudo_root,
tokenizer,
splits,
scene_types,
size=512,
center_crop=True,
num_train_imgs=None,
tokenizer_max_length=None,
empty_prompt = False,
unified_prompt = None,
):
self.data_root = Path(data_root)
self.pseudo_root = Path(pseudo_root) | self.splits = check_and_tuplize_tokens( | 1 | 2023-12-08 16:34:44+00:00 | 4k |
modelscope/llmuses | llmuses/benchmarks/general_qa/general_qa_adapter.py | [
{
"identifier": "DataAdapter",
"path": "llmuses/benchmarks/data_adapter.py",
"snippet": "class DataAdapter(ABC):\n\n def __init__(self,\n subset_list: list,\n metric_list: list,\n few_shot_num: Optional[int] = 0,\n train_split: Optional[str] = None,\n eval_split: Optional[str] = None,\n **kwargs):\n \"\"\"\n Args:\n subset_list: list of subset names for the dataset.\n metric_list: list, the metric list to evaluate the model on specific benchmark.\n few_shot_num: int, number of few-shot examples. Default: 0\n train_split: str, usually for few-shot examples. e.g. 'train'\n eval_split: str, the target eval split name. e.g. 'test'\n \"\"\"\n self.subset_list = subset_list\n self.metric_list = metric_list\n self.few_shot_num = few_shot_num\n self.train_split = train_split\n self.eval_split = eval_split\n\n def load(self,\n dataset_name_or_path: str,\n subset_list: list = None,\n work_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,\n **kwargs) -> dict:\n \"\"\"\n Load the dataset. Remote and local datasets are supported.\n You can rewrite this method to support your own local dataset, just follow the format of the output.\n\n Returns: {'subset_name': {'train': train_dataset, 'test': test_dataset}}\n train_dataset, test_dataset: Iterable dataset, object each item of which is a dict.\n\n TODO: local data path to be supported.\n \"\"\"\n data_dict = {}\n\n split_list = [split for split in [self.train_split, self.eval_split] if split is not None]\n if len(split_list) == 0:\n logger.error(f'Got empty split list: {split_list}')\n\n subset_list = subset_list if subset_list is not None else self.subset_list\n for sub_name in subset_list:\n data_dict[sub_name] = {}\n # e.g. train: few-shot, test: target dataset to evaluate\n for split in split_list:\n dataset = Benchmark.load(dataset_name=dataset_name_or_path,\n subset=sub_name,\n split=split,\n hub='ModelScope',\n work_dir=work_dir,\n **kwargs)\n\n data_dict[sub_name].update({split: dataset})\n\n return data_dict\n\n def gen_prompts(self, data_dict: dict) -> dict:\n \"\"\"\n Generate dataset prompts from raw input, unify the prompt format for different datasets.\n\n Args:\n data_dict: Refer to the output of load method: llmuses.benchmarks.benchmark.Benchmark.load\n\n Returns:\n {'subset_name': [prompt_d_1, prompt_d_2, ...]}\n prompt_d_i (dict): refer to the output of gen_prompt method.\n\n e.g. train -- few-shot data, test -- target dataset to evaluate.\n \"\"\"\n res_dict: dict = {}\n\n if self.few_shot_num < 0:\n raise ValueError(f'Invalid shot_num: {self.few_shot_num} for few-shot evaluation.')\n\n logger.info(f'\\n** Use default settings: \\n'\n f'>few_shot_num: {self.few_shot_num}, '\n f'>few_shot_split: {self.train_split}, '\n f'>target_eval_split: {self.eval_split}')\n\n for sub_name, sub_data_dict in data_dict.items():\n few_shot_data = []\n if self.few_shot_num > 0:\n few_shot_data = self.get_fewshot_examples(\n [item for item in sub_data_dict[self.train_split]],\n self.few_shot_num)\n\n res_dict[sub_name] = []\n for sample_d in sub_data_dict[self.eval_split]:\n prompt_d = self.gen_prompt(input_d=sample_d, subset_name=sub_name, few_shot_list=few_shot_data)\n prompt_d[AnswerKeys.RAW_INPUT] = sample_d\n res_dict[sub_name].append(prompt_d)\n\n rnd = random.Random()\n rnd.seed(42)\n for k, v in res_dict.items():\n rnd.shuffle(v)\n\n return res_dict\n\n @abstractmethod\n def gen_prompt(self, *args, **kwargs) -> Any:\n \"\"\"\n Generate model prompt from raw input, unify the prompt format for different datasets.\n The input format is compatible with OpenAI Chat Completions APIs.\n Refer to: https://platform.openai.com/docs/guides/gpt/chat-completions-api\n\n Args:\n input_d (Any): The raw input. Depending on the dataset.\n\n Returns:\n For class MultiChoiceModelAdapter, the output format is:\n {'data': [full_prompt]}, -- full_prompt: str, the constructed prompt for each sample from dataset.\n\n For class ContinuationEvalModelAdapter, the output format is:\n {'data': ctx_continuation_pair_list, 'multi_choices': self.choices}\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_gold_answer(self, input_d: Any) -> Any:\n \"\"\"\n Parse the raw input labels (gold).\n\n Args:\n input_d: input raw data. Depending on the dataset.\n\n Returns:\n The parsed input. e.g. gold answer ... Depending on the dataset.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def parse_pred_result(self, result: Any, raw_input_d: dict = None) -> Any:\n \"\"\"\n Parse the predicted result and extract proper answer.\n\n Args:\n result: Predicted answer from the model. Usually a string for chat.\n raw_input_d: The raw input. Depending on the dataset.\n\n Returns:\n The parsed answer. Depending on the dataset. Usually a string for chat.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def match(self, gold: Any, pred: Any) -> Any:\n \"\"\"\n Match the gold answer and the predicted answer.\n\n Args:\n gold (Any): The golden answer. Usually a string for chat/multiple-choice-questions.\n e.g. 'A'\n pred (Any): The predicted answer. Usually a string for chat/multiple-choice-questions.\n e.g. 'B'\n\n Returns:\n The match result. Usually a score (float) for chat/multiple-choice-questions.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def compute_metric(self, review_res_list: list) -> Any:\n \"\"\"\n Compute evaluation result by specific metrics.\n\n Args:\n review_res_list: list, the review result list, each item of which is match result for gold and pred.\n\n Attributes:\n DataAdapter.metric_func_map: metric_name -> metric_func mapping,\n e.g. {'WeightedAverageAccuracy': weighted_average_acc}\n\n Returns:\n Metric results.\n \"\"\"\n raise NotImplementedError\n\n def gen_report(self, subset_score_map: dict) -> dict:\n \"\"\"\n Generate report for the evaluation results for all subsets.\n\n Args:\n subset_score_map: The subset-score map.\n e.g. {subset_name: (score, num)}\n\n Returns: The evaluation report. Note: should normalize the score by normalize_score method in utils.\n\n Here is a format example for ARC-Challenge:\n {\n \"name\":\"ARC-Challenge\",\n \"metric\":\"WeightedAverageAccuracy\",\n \"score\": 0.3389,\n \"category\":[\n {\n \"name\":\"DEFAULT\",\n \"score\": 0.3389,\n \"subset\":[\n {\n \"name\":\"ARC-Challenge\",\n \"score\": 0.3389\n },\n ]\n }\n ],\n \"total_num\":100\n }\n \"\"\"\n raise NotImplementedError\n\n def get_fewshot_examples(self, data_list: list, k: int):\n\n if k > len(data_list):\n k = len(data_list)\n\n return random.sample(data_list, k)"
},
{
"identifier": "bleu_ngram_one_sample",
"path": "llmuses/metrics/metrics.py",
"snippet": "def bleu_ngram_one_sample(predict, reference):\n \"\"\"\n Calculate BLEU-1, BLEU-2, BLEU-3, and BLEU-4 scores\n\n Args:\n items: [(ref, pred)]\n \n Returns:\n {\n 'bleu-1': 0.8,\n 'bleu-2': 0.45,\n 'bleu-3': 0.0,\n 'bleu-4': 0.0\n }\n\n \"\"\"\n def is_contains_chinese(strs):\n for _char in strs:\n if '\\u4e00' <= _char <= '\\u9fa5':\n return True\n return False\n\n predict = list(jieba.cut(predict)) if is_contains_chinese(predict) else word_tokenize(predict)\n reference = [list(jieba.cut(reference))] if is_contains_chinese(reference) else [word_tokenize(reference)]\n\n result = dict()\n result['bleu-1'] = sentence_bleu(reference, predict, weights=(1, 0, 0, 0))\n result['bleu-2'] = sentence_bleu(reference, predict, weights=(0, 1, 0, 0))\n result['bleu-3'] = sentence_bleu(reference, predict, weights=(0, 0, 1, 0))\n result['bleu-4'] = sentence_bleu(reference, predict, weights=(0, 0, 0, 1))\n\n return result"
},
{
"identifier": "weighted_mean",
"path": "llmuses/metrics/metrics.py",
"snippet": "def weighted_mean(items) -> float:\n # e.g. [(0,1), (0.5,1), (1,1)]\n a, b = zip(*items)\n return sum(a) / sum(b)"
},
{
"identifier": "compute_rouge_score_one_sample_zh",
"path": "llmuses/metrics/rouge_metric.py",
"snippet": "def compute_rouge_score_one_sample_zh(predict, reference):\n result = dict()\n for p, r in zip(predict, reference):\n p = ' '.join(jieba.cut(p)) if is_contains_chinese(p) else p\n r = ' '.join(jieba.cut(r)) if is_contains_chinese(r) else r\n\n score = zh_scorer.get_scores(p, r)[0]\n result['rouge-1-r'] = score['rouge-1']['r']\n result['rouge-1-p'] = score['rouge-1']['p']\n result['rouge-1-f'] = score['rouge-1']['f']\n result['rouge-2-r'] = score['rouge-2']['r']\n result['rouge-2-p'] = score['rouge-2']['p']\n result['rouge-2-f'] = score['rouge-2']['f']\n result['rouge-l-r'] = score['rouge-l']['r']\n result['rouge-l-p'] = score['rouge-l']['p']\n result['rouge-l-f'] = score['rouge-l']['f']\n \n return result"
},
{
"identifier": "get_logger",
"path": "llmuses/utils/logger.py",
"snippet": "def get_logger(log_file: Optional[str] = None,\n log_level: int = logging.INFO,\n file_mode: str = 'w'):\n \"\"\" Get logging logger\n\n Args:\n log_file: Log filename, if specified, file handler will be added to\n logger\n log_level: Logging level.\n file_mode: Specifies the mode to open the file, if filename is\n specified (if filemode is unspecified, it defaults to 'w').\n \"\"\"\n\n logger_name = __name__.split('.')[0]\n logger = logging.getLogger(logger_name)\n\n if logger_name in init_loggers:\n add_file_handler_if_needed(logger, log_file, file_mode, log_level)\n return logger\n\n for handler in logger.root.handlers:\n if type(handler) is logging.StreamHandler:\n handler.setLevel(logging.ERROR)\n\n stream_handler = logging.StreamHandler()\n handlers = [stream_handler]\n\n if log_file is not None:\n file_handler = logging.FileHandler(log_file, file_mode)\n handlers.append(file_handler)\n\n for handler in handlers:\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n logger.addHandler(handler)\n\n logger.setLevel(log_level)\n\n init_loggers[logger_name] = True\n\n return logger"
}
] | from llmuses.benchmarks.data_adapter import DataAdapter
from llmuses.metrics.metrics import bleu_ngram_one_sample, weighted_mean
from llmuses.metrics.rouge_metric import compute_rouge_score_one_sample_zh
from llmuses.utils.logger import get_logger
from typing import Any, Optional
from collections import defaultdict
import json | 3,162 | # Copyright (c) Alibaba, Inc. and its affiliates.
logger = get_logger()
DATASET_ID = 'general_qa'
SUBSET_LIST = ['default']
class GeneralQAAdapter(DataAdapter):
def __init__(self,
subset_list: list = None,
metric_list: list = None,
train_split: str = 'train',
eval_split: str = 'test',
**kwargs):
if subset_list is None:
subset_list = SUBSET_LIST
if metric_list is None:
| # Copyright (c) Alibaba, Inc. and its affiliates.
logger = get_logger()
DATASET_ID = 'general_qa'
SUBSET_LIST = ['default']
class GeneralQAAdapter(DataAdapter):
def __init__(self,
subset_list: list = None,
metric_list: list = None,
train_split: str = 'train',
eval_split: str = 'test',
**kwargs):
if subset_list is None:
subset_list = SUBSET_LIST
if metric_list is None: | metric_list = [{'name': 'WeightedAverageBLEU', 'object': weighted_mean}] | 2 | 2023-12-07 06:10:49+00:00 | 4k |
AsuradaYuci/TF-CLIP | model/make_model_clipreid.py | [
{
"identifier": "SimpleTokenizer",
"path": "model/clip/simple_tokenizer.py",
"snippet": "class SimpleTokenizer(object):\n def __init__(self, bpe_path: str = default_bpe()):\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n merges = gzip.open(bpe_path).read().decode(\"utf-8\").split('\\n')\n merges = merges[1:49152-256-2+1]\n merges = [tuple(merge.split()) for merge in merges]\n vocab = list(bytes_to_unicode().values())\n vocab = vocab + [v+'</w>' for v in vocab]\n for merge in merges:\n vocab.append(''.join(merge))\n vocab.extend(['<|startoftext|>', '<|endoftext|>'])\n self.encoder = dict(zip(vocab, range(len(vocab))))\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}\n self.pat = re.compile(r\"\"\"<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\", re.IGNORECASE)\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token[:-1]) + ( token[-1] + '</w>',)\n pairs = get_pairs(word)\n\n if not pairs:\n return token+'</w>'\n\n while True:\n bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word)-1 and word[i+1] == second:\n new_word.append(first+second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n self.cache[token] = word\n return word\n\n def encode(self, text):\n bpe_tokens = []\n text = whitespace_clean(basic_clean(text)).lower()\n for token in re.findall(self.pat, text):\n token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))\n bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))\n return bpe_tokens\n\n def decode(self, tokens):\n text = ''.join([self.decoder[token] for token in tokens])\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=\"replace\").replace('</w>', ' ')\n return text"
},
{
"identifier": "QuickGELU",
"path": "model/clip/model.py",
"snippet": "class QuickGELU(nn.Module):\n def forward(self, x: torch.Tensor):\n return x * torch.sigmoid(1.702 * x)"
},
{
"identifier": "LayerNorm",
"path": "model/clip/model.py",
"snippet": "class LayerNorm(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm to handle fp16.\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n ret = super().forward(x.type(torch.float32))\n return ret.type(orig_type)"
},
{
"identifier": "visual_prompt",
"path": "model/Visual_Prompt.py",
"snippet": "class visual_prompt(nn.Module):\n def __init__(self, sim_head, T):\n super().__init__()\n self.sim_header = sim_head # Transf\n self.T = T\n assert sim_head in [\"meanP\", \"LSTM\", \"Transf\", \"Conv_1D\", \"Transf_cls\"]\n\n if self.sim_header == \"LSTM\" or self.sim_header == \"Transf\" or self.sim_header == \"Transf_cls\" or self.sim_header == \"Conv_1D\" :\n embed_dim = 512 # 512\n\n context_length = 77 # 77\n # vocab_size = clip_state_dict[\"token_embedding.weight\"].shape[0]\n transformer_width = 512\n transformer_heads = transformer_width // 64\n\n self.frame_position_embeddings = nn.Embedding(context_length, embed_dim) # 77, 512\n if self.sim_header == \"Transf\" :\n self.transformer = TemporalTransformer(width=embed_dim, layers=6, heads=transformer_heads)\n print('layer=6')\n if self.sim_header == \"LSTM\":\n self.lstm_visual = nn.LSTM(input_size=embed_dim, hidden_size=embed_dim,\n batch_first=True, bidirectional=False, num_layers=1)\n\n self.apply(self.init_weights)\n\n if self.sim_header == \"Transf_cls\":\n self.transformer = TAggregate(clip_length=self.T, embed_dim=embed_dim, n_layers=6)\n\n if self.sim_header == 'Conv_1D' :\n self.shift = nn.Conv1d(embed_dim, embed_dim, 3, padding=1, groups=embed_dim, bias=False)\n weight = torch.zeros(embed_dim, 1, 3)\n weight[:embed_dim // 4, 0, 0] = 1.0\n weight[embed_dim // 4:embed_dim // 4 + embed_dim // 2, 0, 1] = 1.0\n weight[-embed_dim // 4:, 0, 2] = 1.0\n self.shift.weight = nn.Parameter(weight)\n\n def init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=0.02)\n elif isinstance(module, LayerNorm):\n if 'beta' in dir(module) and 'gamma' in dir(module):\n module.beta.data.zero_()\n module.gamma.data.fill_(1.0)\n else:\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n def forward(self, x):\n b, t, c = x.size() # 2, 8, 512\n x = x.contiguous()\n if self.sim_header == \"meanP\":\n pass\n elif self.sim_header == 'Conv_1D':\n x_original = x\n x = x.view(-1, c, t) # torch.Size([2, 8, 512])\n x = self.shift(x.float())\n x = x.permute(0, 2, 1)\n x = x.type(x_original.dtype) + x_original\n\n elif self.sim_header == \"Transf\":\n x_original = x # # torch.Size([2, 8, 512])\n seq_length = t # t=8\n position_ids = torch.arange(seq_length, dtype=torch.long, device=x.device) # tensor([0, 1, 2, 3, 4, 5, 6, 7], device='cuda:1')\n position_ids = position_ids.unsqueeze(0).expand(x.size(0), -1) # torch.Size([2, 8])\n frame_position_embeddings = self.frame_position_embeddings(position_ids) # torch.Size([2, 8, 512])\n x = x + frame_position_embeddings\n\n x = x.permute(1, 0, 2) # NLD -> LND # torch.Size([8, 2, 512])\n x = self.transformer(x) # torch.Size([8, 2, 512])\n x = x.permute(1, 0, 2) # LND -> NLD # torch.Size([2, 8, 512])\n x = x.type(x_original.dtype) + x_original\n\n elif self.sim_header == \"LSTM\":\n x_original = x\n x, _ = self.lstm_visual(x.float())\n self.lstm_visual.flatten_parameters()\n x = torch.cat((x, x_original[:, x.size(1):, ...].contiguous()), dim=1)\n x = x.type(x_original.dtype) + x_original\n elif self.sim_header == \"Transf_cls\":\n x_original = x\n return self.transformer(x).type(x_original.dtype)\n\n else:\n raise ValueError('Unknown optimizer: {}'.format(self.sim_header))\n return x.mean(dim=1, keepdim=False)"
},
{
"identifier": "clip",
"path": "model/clip/clip.py",
"snippet": " BICUBIC = InterpolationMode.BICUBIC\n BICUBIC = Image.BICUBIC\n_MODELS = {\n \"RN50\": \"https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt\",\n \"RN101\": \"https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt\",\n \"RN50x4\": \"https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt\",\n \"RN50x16\": \"https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt\",\n \"ViT-B-32\": \"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt\",\n \"ViT-B-16\": \"https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt\",\n}\ndef _download(url: str, root: str = os.path.expanduser(\"~/.cache/clip\")):\ndef _transform(n_px):\ndef available_models() -> List[str]:\ndef load(name: str, device: Union[str, torch.device] = \"cuda\" if torch.cuda.is_available() else \"cpu\", jit=False):\n def patch_device(module):\n def patch_float(module):\ndef tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor:"
}
] | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from collections import OrderedDict
from .clip.simple_tokenizer import SimpleTokenizer as _Tokenizer
from .clip.model import QuickGELU, LayerNorm
from .Visual_Prompt import visual_prompt
from .clip import clip | 3,090 | _tokenizer = _Tokenizer()
# from .TAT import TemporalAttentionTransformer
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
def load_clip_to_cpu(backbone_name, h_resolution, w_resolution, vision_stride_size):
| _tokenizer = _Tokenizer()
# from .TAT import TemporalAttentionTransformer
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
def load_clip_to_cpu(backbone_name, h_resolution, w_resolution, vision_stride_size): | url = clip._MODELS[backbone_name] | 4 | 2023-12-11 04:03:46+00:00 | 4k |
MarilynKeller/aitviewer-skel | aitviewer/remote/viewer.py | [
{
"identifier": "Message",
"path": "aitviewer/remote/message.py",
"snippet": "class Message(enum.Enum):\n \"\"\"Enumeration for the type of message.\"\"\"\n\n # Messages used to create nodes on the remote viewer.\n NODE = 1\n MESHES = 2\n SPHERES = 3\n LINES = 4\n ARROWS = 5\n RIGID_BODIES = 6\n SMPL = 10\n\n # Messages used to modify existing nodes on the remote viewer.\n DELETE = 100\n ADD_FRAMES = 101\n UPDATE_FRAMES = 102\n REMOVE_FRAMES = 103\n\n # Built-in uitilities to interact with the viewer.\n SET_FRAME = 200\n NEXT_FRAME = 201\n PREVIOUS_FRAME = 202\n\n # All values greater than or equal to Message.USER_MESSAGE are not used internally by the viewer\n # and can be safely used to send custom messages.\n USER_MESSAGE = 10000"
},
{
"identifier": "make_message",
"path": "aitviewer/remote/message.py",
"snippet": "def make_message(type, uid, args, kwargs):\n msg = {\n \"type\": type,\n \"uid\": uid,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n return msg"
}
] | import asyncio
import pickle
import queue
import subprocess
import threading
import websockets
from typing import Callable
from .message import Message, make_message | 1,709 | v = cls(**kwargs)
v.process = process
return v
def _entry(self, url):
# Entry point of the client thread.
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self._async_entry(url))
async def _async_entry(self, url):
# Async entry point of the client thread.
# Attempt to connect until 'self.timeout' seconds passed.
start_time = self.loop.time()
try:
while self.loop.time() < start_time + self.timeout:
try:
self.websocket = await websockets.connect(url, max_size=None)
self.connected = True
break
except Exception as e:
pass
finally:
# Release the semaphore to let the main thread continue after
# attempting to connect. The main thread will read the
# self.connected variable to know if we succeded at connecting.
self.semaphore.release()
# Exit the client thread if we failed to connect.
if not self.connected:
return
# Create a queue for incoming messages to the main thread.
self.recv_queue = queue.Queue()
# Message loop.
try:
# This loop is exited whenever the connection is dropped
# which causes and exception to be raised.
async for message in self.websocket:
data = pickle.loads(message)
# Equeue data for the main thread to process.
self.recv_queue.put_nowait(data)
except Exception as e:
print(f"Message loop exception: {e}")
# Mark the connection as closed.
self.connected = False
def get_message(self, block=True):
"""
Returns the next message received by the remote viewer.
:param block: if True this function blocks until a message is received, otherwise it returns immediately.
:return: if block is True returns the next message or None if the connection has been closed.
if block is False returns the next message or None if there are no messages.
"""
if self.connected:
if block:
while self.connected:
try:
return self.recv_queue.get(timeout=0.1)
except queue.Empty:
pass
else:
if not self.recv_queue.empty():
return self.recv_queue.get_nowait()
return None
def process_messages(self, handler: Callable[["RemoteViewer", object], None], block=True):
"""
Processes messages in a loop calling 'handler' for each message.
:param block: if True this function blocks until the connection is closed, otherwise it returns
after all messages received so far have been processed.
:return: if block is True always returns False when the connection has been closed.
if block is False returns True if the connection is still open or False if the connection
has been closed.
"""
while True:
msg = self.get_message(block)
if msg is None:
if block:
return False
else:
return self.connected
handler(self, msg)
async def _async_send(self, data):
await self.websocket.send(data)
def send(self, data):
try:
if self.connected:
# Send a message by adding a send coroutine to the thread's loop and wait for it to complete.
asyncio.run_coroutine_threadsafe(self._async_send(data), self.loop).result()
except Exception as e:
print(f"Send exception: {e}")
def send_message(self, type, uid=None, *args, **kwargs):
"""
Send a message to the viewer. See Viewer.process_message()
for information about how these parameters are interpreted
by the viewer.
"""
msg = make_message(type, uid, args, kwargs)
data = pickle.dumps(msg)
self.send(data)
def set_frame(self, frame: int):
"""
Set the current active frame of the remote viewer.
:param frame: an integer representing the id of the frame.
"""
| # Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos
class RemoteViewer:
def __init__(self, host="localhost", port=8417, timeout=10, verbose=True):
"""
Initializer.
:param host: the IP address of a host to connect to as a string.
:param port: the TCP port to connect to.
:param timeout: a timeout in seconds for attempting to connect to the viewer.
:param verbose: if True print info messages.
"""
url = f"ws://{host}:{port}"
if verbose:
print(f"Connecting to remote viewer at {url}")
self.timeout = timeout
self.connected = False
# Semaphore used to wait for the connection to be setup by the client thread.
self.semaphore = threading.Semaphore(0)
# Create a thread for running the websocket client async loop.
self.loop = asyncio.new_event_loop()
self.thread = threading.Thread(target=self._entry, args=(url,), daemon=True)
self.thread.start()
# Wait for the connection to be setup.
self.semaphore.acquire()
if verbose:
if self.connected:
print("Connected")
else:
print(f"Failed to connect")
self.process: subprocess.Popen = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close_connection()
@classmethod
def create_new_process(cls, args=None, **kwargs):
"""
Open a Viewer in a new process and return a RemoteViewer connected to it.
:param args: This parameter can be used to specify an argument or
a list of arguments that is used to create the new process.
e.g: args = ["path/to/script.py", "arg1", "arg2"] will invoke the following command:
python path/to/script.py arg1 arg2
"""
# If host is None create a new viewer in a separate process.
if args is None:
popen_args = ["python", "-m", "aitviewer.server"]
else:
if isinstance(args, list):
popen_args = ["python"] + args
else:
popen_args = ["python", str(args)]
# Create the viewer process.
process = subprocess.Popen(
popen_args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
# Create a remote viewer connected to the child process.
v = cls(**kwargs)
v.process = process
return v
def _entry(self, url):
# Entry point of the client thread.
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self._async_entry(url))
async def _async_entry(self, url):
# Async entry point of the client thread.
# Attempt to connect until 'self.timeout' seconds passed.
start_time = self.loop.time()
try:
while self.loop.time() < start_time + self.timeout:
try:
self.websocket = await websockets.connect(url, max_size=None)
self.connected = True
break
except Exception as e:
pass
finally:
# Release the semaphore to let the main thread continue after
# attempting to connect. The main thread will read the
# self.connected variable to know if we succeded at connecting.
self.semaphore.release()
# Exit the client thread if we failed to connect.
if not self.connected:
return
# Create a queue for incoming messages to the main thread.
self.recv_queue = queue.Queue()
# Message loop.
try:
# This loop is exited whenever the connection is dropped
# which causes and exception to be raised.
async for message in self.websocket:
data = pickle.loads(message)
# Equeue data for the main thread to process.
self.recv_queue.put_nowait(data)
except Exception as e:
print(f"Message loop exception: {e}")
# Mark the connection as closed.
self.connected = False
def get_message(self, block=True):
"""
Returns the next message received by the remote viewer.
:param block: if True this function blocks until a message is received, otherwise it returns immediately.
:return: if block is True returns the next message or None if the connection has been closed.
if block is False returns the next message or None if there are no messages.
"""
if self.connected:
if block:
while self.connected:
try:
return self.recv_queue.get(timeout=0.1)
except queue.Empty:
pass
else:
if not self.recv_queue.empty():
return self.recv_queue.get_nowait()
return None
def process_messages(self, handler: Callable[["RemoteViewer", object], None], block=True):
"""
Processes messages in a loop calling 'handler' for each message.
:param block: if True this function blocks until the connection is closed, otherwise it returns
after all messages received so far have been processed.
:return: if block is True always returns False when the connection has been closed.
if block is False returns True if the connection is still open or False if the connection
has been closed.
"""
while True:
msg = self.get_message(block)
if msg is None:
if block:
return False
else:
return self.connected
handler(self, msg)
async def _async_send(self, data):
await self.websocket.send(data)
def send(self, data):
try:
if self.connected:
# Send a message by adding a send coroutine to the thread's loop and wait for it to complete.
asyncio.run_coroutine_threadsafe(self._async_send(data), self.loop).result()
except Exception as e:
print(f"Send exception: {e}")
def send_message(self, type, uid=None, *args, **kwargs):
"""
Send a message to the viewer. See Viewer.process_message()
for information about how these parameters are interpreted
by the viewer.
"""
msg = make_message(type, uid, args, kwargs)
data = pickle.dumps(msg)
self.send(data)
def set_frame(self, frame: int):
"""
Set the current active frame of the remote viewer.
:param frame: an integer representing the id of the frame.
""" | self.send_message(Message.SET_FRAME, None, frame) | 0 | 2023-12-07 16:13:50+00:00 | 4k |
nexB/dejacode | organization/urls.py | [
{
"identifier": "DataspacedCreateView",
"path": "dje/views.py",
"snippet": "class DataspacedCreateView(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n SuccessMessageMixin,\n DataspacedModelFormMixin,\n CreateView,\n):\n template_name = \"object_form.html\"\n\n def get_success_message(self, cleaned_data):\n if self.object:\n model_name = self.object._meta.verbose_name.title()\n return f'{model_name} \"{self.object}\" was successfully created.'"
},
{
"identifier": "DataspacedDeleteView",
"path": "dje/views.py",
"snippet": "class DataspacedDeleteView(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n GetDataspacedObjectMixin,\n DataspacedModelFormMixin,\n DeleteView,\n):\n template_name = \"object_confirm_delete.html\"\n\n def get_deletion_status(self):\n from dje.admin import dejacode_site\n\n objs = [self.object]\n __, __, perms_needed, protected = get_deleted_objects(objs, self.request, dejacode_site)\n\n return perms_needed, protected\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n perms_needed, protected = self.get_deletion_status()\n context.update(\n {\n \"opts\": self.object._meta,\n \"perms_needed\": perms_needed,\n \"protected\": protected,\n }\n )\n\n return context\n\n def form_valid(self, form):\n \"\"\"Add success message and History entry.\"\"\"\n self.object = self.get_object()\n perms_needed, protected = self.get_deletion_status()\n if perms_needed or protected:\n raise Http404(\"Permission denied\")\n\n response = super().form_valid(form)\n\n model_name = self.object._meta.verbose_name.title()\n message = f'{model_name} \"{self.object}\" was successfully deleted.'\n messages.success(self.request, message)\n\n History.log_deletion(self.request.user, self.object)\n\n return response\n\n def get_success_url(self):\n if self.success_url:\n return super().get_success_url()\n\n opts = self.object._meta\n return reverse(f\"{opts.app_label}:{opts.model_name}_list\")"
},
{
"identifier": "DataspacedUpdateView",
"path": "dje/views.py",
"snippet": "class DataspacedUpdateView(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n GetDataspacedObjectMixin,\n DataspacedModelFormMixin,\n UpdateView,\n):\n template_name = \"object_form.html\"\n\n def form_valid(self, form):\n if form.has_changed():\n model_name = self.model._meta.verbose_name.title()\n save_as_new = getattr(form, \"save_as_new\", None)\n action = \"cloned\" if save_as_new else \"updated\"\n msg = f'{model_name} \"{self.object}\" was successfully {action}.'\n messages.success(self.request, msg)\n return super().form_valid(form)\n else:\n messages.warning(self.request, \"No fields changed.\")\n return redirect(self.get_success_url())"
},
{
"identifier": "OwnerForm",
"path": "organization/forms.py",
"snippet": "class OwnerForm(DataspacedModelForm):\n save_as = True\n\n class Meta:\n model = Owner\n fields = [\n \"name\",\n \"homepage_url\",\n \"contact_info\",\n \"notes\",\n \"alias\",\n \"type\",\n ]\n widgets = {\n \"notes\": forms.Textarea(attrs={\"rows\": 2}),\n }\n\n @property\n def helper(self):\n helper = super().helper\n\n helper.layout = Layout(\n Fieldset(\n None,\n Group(\"name\", \"alias\", \"contact_info\"),\n Group(\"homepage_url\", \"type\"),\n \"notes\",\n Submit(\"submit\", self.submit_label, css_class=\"btn-success\"),\n self.save_as_new_submit,\n ),\n )\n\n return helper"
},
{
"identifier": "Owner",
"path": "organization/models.py",
"snippet": "class Owner(\n ExternalReferenceMixin,\n HistoryFieldsMixin,\n ParentChildModelMixin,\n DataspacedModel,\n):\n name = models.CharField(\n db_index=True,\n max_length=70,\n help_text=_(\n \"The unique user-maintained name of the author, custodian, or provider of \"\n \"one or more software objects (licenses, components, products).\"\n ),\n )\n\n homepage_url = models.URLField(\n _(\"Homepage URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"The homepage URL of the owner.\"),\n )\n\n contact_info = models.CharField(\n _(\"contact information\"),\n max_length=500,\n blank=True,\n help_text=_(\n \"Information, frequently a dedicated email address, about \"\n \"contacting an owner for license clarifications and permissions.\"\n ),\n )\n\n notes = models.TextField(blank=True, help_text=_(\"Extended notes about an owner.\"))\n\n alias = models.CharField(\n db_index=True,\n max_length=500,\n blank=True,\n help_text=_(\"Alternative spellings of the name of the owner as a comma-separated list.\"),\n )\n\n OWNER_TYPE_CHOICES = (\n (\n \"Organization\",\n _(\"Organization: an ongoing entity that provides software or promotes standards.\"),\n ),\n (\"Person\", _(\"Person: an individual that provides software or promotes standards.\")),\n (\"Project\", _(\"Project: a dynamic collection of contributors to a software project.\")),\n )\n\n type = models.CharField(\n max_length=20,\n default=\"Organization\",\n choices=OWNER_TYPE_CHOICES,\n db_index=True,\n help_text=_(\n \"An owner type differentiates individuals, ongoing businesses, and \"\n \"dynamic organizations (such as software projects). \"\n \"An owner of any type can be associated with a license, component, or \"\n \"product. An owner can also be the parent of any other owner.\"\n ),\n )\n\n # Use choices database values instead of the `get_FIELD_display`, in reporting.\n type.report_with_db_value = True\n\n # This reference all the Owners associated with self through a\n # Subowner relation where self is the parents.\n # Only the children are copied on ParentChild relation type.\n children = models.ManyToManyField(\n to=\"self\",\n through=\"Subowner\",\n symmetrical=False,\n )\n\n class Meta:\n unique_together = (\n (\"dataspace\", \"name\"),\n (\"dataspace\", \"uuid\"),\n )\n ordering = [\"name\"]\n\n def __str__(self):\n return self.name\n\n @property\n def urn(self):\n return urn.build(\"owner\", name=self.name)\n\n def get_url(self, name, params=None):\n if not params:\n params = [self.dataspace.name, quote_plus(self.name)]\n return super().get_url(name, params)\n\n def get_absolute_url(self):\n return self.get_url(\"details\")\n\n @property\n def details_url(self):\n return self.get_absolute_url()\n\n def get_change_url(self):\n return self.get_url(\"change\")\n\n def get_delete_url(self):\n return self.get_url(\"delete\")\n\n @staticmethod\n def get_extra_relational_fields():\n return [\"external_references\"]\n\n @property\n def case_insensitive_unique_on(self):\n return [\"name\"]\n\n def get_alias_list(self):\n return self.alias.replace(\", \", \",\").split(\",\")\n\n def as_spdx(self):\n spdx_type = \"Person\" if self.type == \"Person\" else \"Organization\"\n return f\"{spdx_type}: {self.name}\""
},
{
"identifier": "OwnerDetailsView",
"path": "organization/views.py",
"snippet": "class OwnerDetailsView(\n AcceptAnonymousMixin,\n ObjectDetailsView,\n):\n model = Owner\n slug_url_kwarg = \"name\"\n template_name = \"organization/owner_details.html\"\n include_reference_dataspace = True\n show_previous_and_next_object_links = True\n tabset = {\n \"essentials\": {\n \"fields\": [\n \"name\",\n \"homepage_url\",\n \"type\",\n \"contact_info\",\n \"alias\",\n \"notes\",\n \"urn\",\n \"dataspace\",\n ],\n },\n \"licenses\": {\n \"fields\": [\n \"licenses\",\n ],\n },\n \"components\": {\n \"fields\": [\n \"components\",\n ],\n },\n \"hierarchy\": {},\n \"external_references\": {\n \"fields\": [\n \"external_references\",\n ],\n },\n \"history\": {\n \"fields\": [\n \"created_date\",\n \"created_by\",\n \"last_modified_date\",\n \"last_modified_by\",\n ],\n },\n }\n\n def get_queryset(self):\n license_qs = License.objects.select_related(\"license_profile\", \"category\", \"usage_policy\")\n\n return (\n super()\n .get_queryset()\n .prefetch_related(\n \"component_set__packages\",\n \"component_set__licenses__usage_policy\",\n \"external_references\",\n Prefetch(\"license_set\", queryset=license_qs),\n )\n )\n\n def tab_essentials(self):\n tab_fields = [\n TabField(\"name\"),\n TabField(\"homepage_url\", value_func=urlize_target_blank),\n TabField(\"type\"),\n TabField(\"contact_info\", value_func=urlize_target_blank),\n TabField(\"alias\"),\n TabField(\"notes\"),\n (_(\"URN\"), self.object.urn_link, URN_HELP_TEXT, None),\n TabField(\"dataspace\"),\n ]\n\n return {\"fields\": self.get_tab_fields(tab_fields)}\n\n def tab_licenses(self):\n license_qs = self.object.license_set.all()\n if license_qs:\n return {\n \"fields\": [\n (None, license_qs, None, \"organization/tabs/tab_license.html\"),\n ]\n }\n\n def tab_hierarchy(self):\n hierarchy = self.get_owner_hierarchy(self.object)\n if hierarchy:\n return {\"fields\": [], \"extra\": hierarchy}"
},
{
"identifier": "OwnerListView",
"path": "organization/views.py",
"snippet": "class OwnerListView(\n AcceptAnonymousMixin,\n DataspacedFilterView,\n):\n model = Owner\n filterset_class = OwnerFilterSet\n template_list_table = \"organization/includes/owner_list_table.html\"\n include_reference_dataspace = True\n put_results_in_session = True\n table_headers = (\n Header(\"name\", \"Owner name\"),\n Header(\"licenses\", \"Licenses\", OWNER_LICENSES_HELP, \"license\"),\n Header(\"components\", \"Components\", OWNER_COMPONENTS_HELP, \"component\"),\n Header(\"homepage_url\"),\n Header(\"type\", \"Type\", filter=\"type\"),\n )\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .only(\n \"name\",\n \"alias\",\n \"homepage_url\",\n \"type\",\n \"dataspace\",\n )\n .prefetch_related(\n \"license_set__usage_policy\",\n \"component_set__usage_policy\",\n )\n .annotate(\n license_count=Count(\"license\"),\n component_count=Count(\"component\"),\n has_license_and_component=Case(\n When(license_count__gt=0, component_count__gt=0, then=Value(0)),\n default=Value(1),\n output_field=IntegerField(),\n ),\n )\n .order_by(\"has_license_and_component\", \"name\")\n )"
}
] | from django.urls import path
from dje.views import DataspacedCreateView
from dje.views import DataspacedDeleteView
from dje.views import DataspacedUpdateView
from organization.forms import OwnerForm
from organization.models import Owner
from organization.views import OwnerDetailsView
from organization.views import OwnerListView | 3,019 | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# DejaCode is a trademark of nexB Inc.
# SPDX-License-Identifier: AGPL-3.0-only
# See https://github.com/nexB/dejacode for support or download.
# See https://aboutcode.org for more information about AboutCode FOSS projects.
#
urlpatterns = [
path(
"<str:dataspace>/<str:name>/change/",
DataspacedUpdateView.as_view(
model=Owner,
form_class=OwnerForm,
slug_url_kwarg="name",
permission_required="organization.change_owner",
),
name="owner_change",
),
path(
"<str:dataspace>/<str:name>/delete/",
DataspacedDeleteView.as_view(
model=Owner,
slug_url_kwarg="name",
permission_required="organization.delete_owner",
),
name="owner_delete",
),
path(
"<str:dataspace>/<str:name>/",
OwnerDetailsView.as_view(),
name="owner_details",
),
path(
"add/",
| #
# Copyright (c) nexB Inc. and others. All rights reserved.
# DejaCode is a trademark of nexB Inc.
# SPDX-License-Identifier: AGPL-3.0-only
# See https://github.com/nexB/dejacode for support or download.
# See https://aboutcode.org for more information about AboutCode FOSS projects.
#
urlpatterns = [
path(
"<str:dataspace>/<str:name>/change/",
DataspacedUpdateView.as_view(
model=Owner,
form_class=OwnerForm,
slug_url_kwarg="name",
permission_required="organization.change_owner",
),
name="owner_change",
),
path(
"<str:dataspace>/<str:name>/delete/",
DataspacedDeleteView.as_view(
model=Owner,
slug_url_kwarg="name",
permission_required="organization.delete_owner",
),
name="owner_delete",
),
path(
"<str:dataspace>/<str:name>/",
OwnerDetailsView.as_view(),
name="owner_details",
),
path(
"add/", | DataspacedCreateView.as_view( | 0 | 2023-12-07 16:57:42+00:00 | 4k |
kylemcdonald/i2i-realtime | worker_app.py | [
{
"identifier": "Settings",
"path": "settings.py",
"snippet": "class Settings(BaseSettings):\n # config, cannot be changed\n mode: str = Field(default=\"video\")\n worker_id: int = Field(default=0)\n \n output_fast: bool = Field(default=True)\n zmq_video_port: int = Field(default=5554)\n job_start_port: int = Field(default=5555)\n settings_port: int = Field(default=5556)\n job_finish_port: int = Field(default=5557)\n output_port: int = Field(default=5558)\n osc_port: int = Field(default=8000)\n primary_hostname: str = Field(default='localhost')\n \n translation: bool = Field(default=False)\n safety: bool = Field(default=False)\n local_files_only: bool = Field(default=False)\n warmup: str = Field(default=None)\n threaded: bool = Field(default=False)\n \n # parameters for inference\n prompt: str = Field(default='A psychedelic landscape.')\n num_inference_steps: int = Field(default=2)\n fixed_seed: bool = Field(default=True)\n seed: int = Field(default=0)\n batch_size: int = Field(default=4)\n strength: float = Field(default=0.7)\n passthrough: bool = Field(default=False)\n compel: bool = Field(default=True)\n \n # can be changed dynamically\n opacity: float = Field(default=1.0)\n mirror: bool = Field(default=False)\n debug: bool = Field(default=False)\n pad: bool = Field(default=False)\n fps: int = Field(default=30)\n directory: str = Field(default='data/frames')\n \n class Config:\n env_file = \".env\"\n env_file_encoding = 'utf-8'"
},
{
"identifier": "ThreadedWorker",
"path": "threaded_worker.py",
"snippet": "class ThreadedWorker:\n def __init__(self, has_input=True, has_output=True, mode=\"thread\", debug=False):\n if mode == \"thread\":\n self.ParallelClass = threading.Thread\n self.QueueClass = queue.Queue\n elif mode == \"process\":\n self.ParallelClass = multiprocessing.Process\n self.QueueClass = multiprocessing.Queue\n if has_input:\n self.input_queue = self.QueueClass()\n if has_output:\n self.output_queue = self.QueueClass()\n self.should_exit = False\n self.parallel = self.ParallelClass(target=self.run)\n self.name = self.__class__.__name__\n \n self.debug = debug\n self.last_print = time.time()\n self.print_interval = 1\n self.durations = []\n\n def set_name(self, name):\n self.name = name\n return self\n\n def feed(self, feeder):\n print(self.name, \"feeding with\", feeder.name)\n self.input_queue = feeder.output_queue\n return self\n\n def start(self):\n if self.parallel.is_alive():\n return self\n print(self.name, \"starting\")\n self.parallel.start()\n return self\n\n # called after the parallel is started\n def setup(self):\n pass\n \n def clear_input(self):\n with self.input_queue.mutex:\n self.input_queue.queue.clear()\n\n # called before the parallel is joined\n def cleanup(self):\n pass\n\n def run(self):\n print(self.name, \"running\")\n self.setup()\n try:\n while not self.should_exit:\n \n cur_time = time.time()\n if hasattr(self, \"input_queue\"):\n try:\n input = self.input_queue.get(timeout=0.1)\n except queue.Empty:\n continue\n if input is None:\n break\n start_time = time.time()\n result = self.work(input)\n else:\n start_time = time.time()\n result = self.work()\n duration = time.time() - start_time\n \n if result is not None and hasattr(self, \"output_queue\"):\n self.output_queue.put(result)\n \n self.durations.append(duration)\n if len(self.durations) > 10:\n self.durations.pop(0)\n \n time_since_print = cur_time - self.last_print\n if self.debug and time_since_print > self.print_interval:\n duration = sum(self.durations) / len(self.durations)\n print(self.name, f\"{duration*1000:.2f}ms\", flush=True)\n self.last_print = cur_time\n \n except KeyboardInterrupt:\n print(self.name, \"interrupted\")\n self.cleanup()\n\n def close(self):\n print(self.name, \"closing\")\n self.should_exit = True\n if hasattr(self, \"input_queue\"):\n self.input_queue.put(None)\n if self.parallel.is_alive():\n self.parallel.join()"
},
{
"identifier": "DiffusionProcessor",
"path": "diffusion_processor.py",
"snippet": "class DiffusionProcessor:\n def __init__(self, warmup=None, local_files_only=True):\n base_model = \"stabilityai/sdxl-turbo\"\n vae_model = \"madebyollin/taesdxl\"\n\n warnings.filterwarnings(\"ignore\", category=torch.jit.TracerWarning)\n\n disable_progress_bar()\n self.pipe = AutoPipelineForImage2Image.from_pretrained(\n base_model,\n torch_dtype=torch.float16,\n variant=\"fp16\",\n local_files_only=local_files_only,\n )\n\n self.pipe.vae = AutoencoderTiny.from_pretrained(\n vae_model, torch_dtype=torch.float16, local_files_only=local_files_only\n )\n fix_seed(self.pipe)\n\n print(\"Model loaded\")\n\n config = CompilationConfig.Default()\n config.enable_xformers = True\n config.enable_triton = True\n config.enable_cuda_graph = True\n self.pipe = compile(self.pipe, config=config)\n\n print(\"Model compiled\")\n\n self.pipe.to(device=\"cuda\", dtype=torch.float16)\n self.pipe.set_progress_bar_config(disable=True)\n\n print(\"Model moved to GPU\", flush=True)\n \n self.compel = Compel(\n tokenizer=[self.pipe.tokenizer, self.pipe.tokenizer_2],\n text_encoder=[self.pipe.text_encoder, self.pipe.text_encoder_2],\n returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,\n requires_pooled=[False, True],\n )\n self.prompt_cache = FixedSizeDict(32)\n print(\"Prepared compel\")\n\n self.generator = torch.manual_seed(0)\n \n if warmup:\n warmup_shape = [int(e) for e in warmup.split(\"x\")]\n images = np.zeros(warmup_shape, dtype=np.float32)\n for i in range(2):\n print(f\"Warmup {warmup} {i+1}/2\")\n start_time = time.time()\n self.run(\n images,\n prompt=\"warmup\",\n num_inference_steps=2,\n strength=1.0\n )\n print(\"Warmup finished\", flush=True)\n \n def embed_prompt(self, prompt):\n if prompt not in self.prompt_cache:\n with torch.no_grad():\n print(\"embedding prompt\", prompt)\n self.prompt_cache[prompt] = self.compel(prompt)\n return self.prompt_cache[prompt]\n \n def meta_embed_prompt(self, prompt):\n pattern = r'\\(\"(.*?)\"\\s*,\\s*\"(.*?)\"\\)\\.blend\\((.*?),(.*?)\\)'\n match = re.search(pattern, prompt)\n if not match:\n return self.embed_prompt(prompt)\n str1, str2, t1, t2 = match.groups()\n t1 = float(t1)\n t2 = float(t2)\n cond1, pool1 = self.embed_prompt(str1)\n cond2, pool2 = self.embed_prompt(str2)\n cond = cond1 * t1 + cond2 * t2\n pool = pool1 * t1 + pool2 * t2\n return cond, pool\n \n def run(self, images, prompt, num_inference_steps, strength, use_compel=False, seed=None):\n strength = min(max(1 / num_inference_steps, strength), 1)\n if seed is not None:\n self.generator = torch.manual_seed(seed)\n kwargs = {}\n if use_compel:\n conditioning, pooled = self.meta_embed_prompt(prompt)\n batch_size = len(images)\n conditioning_batch = conditioning.expand(batch_size, -1, -1)\n pooled_batch = pooled.expand(batch_size, -1)\n kwargs[\"prompt_embeds\"] = conditioning_batch\n kwargs[\"pooled_prompt_embeds\"] = pooled_batch\n else:\n kwargs[\"prompt\"] = [prompt] * len(images)\n return self.pipe(\n image=images,\n generator=self.generator,\n num_inference_steps=num_inference_steps,\n guidance_scale=0,\n strength=strength,\n output_type=\"np\",\n **kwargs\n ).images"
}
] | from settings import Settings
from turbojpeg import TurboJPEG, TJPF_RGB
from threaded_worker import ThreadedWorker
from diffusion_processor import DiffusionProcessor
import zmq
import msgpack
import numpy as np
import time | 2,521 |
settings = Settings()
print(f"Starting worker #{settings.worker_id}")
class WorkerReceiver(ThreadedWorker):
def __init__(self, hostname, port):
super().__init__(has_input=False)
self.context = zmq.Context()
self.sock = self.context.socket(zmq.PULL)
self.sock.setsockopt(zmq.RCVTIMEO, 100)
self.sock.setsockopt(zmq.RCVHWM, 1)
self.sock.setsockopt(zmq.LINGER, 0)
address = f"tcp://{hostname}:{port}"
print(f"WorkerReceiver connecting to {address}")
self.sock.connect(address)
self.jpeg = TurboJPEG()
def work(self):
while not self.should_exit:
try:
msg = self.sock.recv(flags=zmq.NOBLOCK, copy=False).bytes
receive_time = time.time()
# print(int(time.time()*1000)%1000, "receiving")
except zmq.Again:
continue
try:
unpacked = msgpack.unpackb(msg)
parameters = unpacked["parameters"]
images = []
for frame in unpacked["frames"]:
img = self.jpeg.decode(frame, pixel_format=TJPF_RGB)
images.append(img / 255)
unpacked["frames"] = images
return unpacked
except OSError:
continue
def cleanup(self):
self.sock.close()
self.context.term()
class Processor(ThreadedWorker):
def __init__(self, settings):
super().__init__()
self.generator = None
self.batch_count = 0
warmup = None
if settings.warmup:
warmup = f"{settings.batch_size}x{settings.warmup}"
|
settings = Settings()
print(f"Starting worker #{settings.worker_id}")
class WorkerReceiver(ThreadedWorker):
def __init__(self, hostname, port):
super().__init__(has_input=False)
self.context = zmq.Context()
self.sock = self.context.socket(zmq.PULL)
self.sock.setsockopt(zmq.RCVTIMEO, 100)
self.sock.setsockopt(zmq.RCVHWM, 1)
self.sock.setsockopt(zmq.LINGER, 0)
address = f"tcp://{hostname}:{port}"
print(f"WorkerReceiver connecting to {address}")
self.sock.connect(address)
self.jpeg = TurboJPEG()
def work(self):
while not self.should_exit:
try:
msg = self.sock.recv(flags=zmq.NOBLOCK, copy=False).bytes
receive_time = time.time()
# print(int(time.time()*1000)%1000, "receiving")
except zmq.Again:
continue
try:
unpacked = msgpack.unpackb(msg)
parameters = unpacked["parameters"]
images = []
for frame in unpacked["frames"]:
img = self.jpeg.decode(frame, pixel_format=TJPF_RGB)
images.append(img / 255)
unpacked["frames"] = images
return unpacked
except OSError:
continue
def cleanup(self):
self.sock.close()
self.context.term()
class Processor(ThreadedWorker):
def __init__(self, settings):
super().__init__()
self.generator = None
self.batch_count = 0
warmup = None
if settings.warmup:
warmup = f"{settings.batch_size}x{settings.warmup}" | self.processor = DiffusionProcessor(warmup, settings.local_files_only) | 2 | 2023-12-05 12:32:28+00:00 | 4k |
sinantan/jsonpyd | src/jsonpyd/cli.py | [
{
"identifier": "JsonPyd",
"path": "src/jsonpyd/jsonpyd.py",
"snippet": "class JsonPyd:\n def __init__(self, schema, options={}):\n assert self.valid_json(schema), \"Schema should be String JSON format.\"\n\n self.schema = json.loads(schema)\n self.options: Options = Options(**options)\n self.model: BaseModel = self.create_base_model(self.schema)\n self.raw_code: str = self.get_raw_code()\n\n def __repr__(self):\n return f\"{self.__class__.__name__}: [{', '.join(self.schema.keys())}]\"\n\n @classmethod\n def valid_json(cls, v) -> bool:\n try:\n json.loads(v)\n return True\n except json.JSONDecodeError:\n return False\n\n def create_base_model(self, schema: Dict[str, Any]) -> BaseModel:\n \"\"\"\n Create a Pydantic BaseModel based on the given schema.\n\n Args:\n schema (Dict[str, Any]): A dictionary representing the schema structure.\n\n Returns:\n BaseModel: A Pydantic BaseModel generated based on the schema.\n \"\"\"\n fields = {}\n for key, value in schema.items():\n if isinstance(value, dict):\n fields[key] = (self.create_base_model(value), ...)\n else:\n value_type = type(value) if value is not None else Any\n fields[key] = (value_type, value)\n\n return create_model(_GENERATED_MODEL_NAME, **fields)(**schema)\n\n def generate_code(\n self,\n data: Dict[str, Any],\n class_name: str = _GENERATED_MODEL_NAME,\n ) -> str:\n \"\"\"\n Dynamically creates a Pydantic BaseModel class based on the given data.\n\n Args:\n data (Dict[str, Any]): A dictionary representing the fields and types for the model.\n class_name (str, optional): The name for the generated Pydantic BaseModel class. Defaults to \"GeneratedModel\".\n\n Returns:\n str: The generated Python code representing the Pydantic BaseModel class.\n \"\"\"\n code = f\"class {class_name}(BaseModel):{_NEWLINE}\"\n\n for field_name, field_value in data.items():\n field_type = (\n type(field_value).__name__ if field_value is not None else \"Any\"\n )\n if isinstance(field_value, dict):\n nested_code = self.generate_code(\n field_value, f\"{self.snake_to_pascal(field_name)}\"\n )\n variable = f\"{_TAB}{field_name}: {self.snake_to_pascal(field_name)}{_NEWLINE * 2}{nested_code}\"\n code += variable\n else:\n value = \"\"\n if self.options.force_optional:\n field_type = f\"Optional[{field_type}]\"\n value = \" = None\"\n variable = f\"{_TAB}{field_name}: {field_type}{value}{_NEWLINE}\"\n code += variable\n\n return code\n\n def get_raw_code(self) -> str:\n \"\"\"\n Generate raw code based on the model dump.\n\n Returns:\n str: Raw code containing class definitions.\n \"\"\"\n generated_code = self.generate_code(self.model.model_dump())\n generated_code = \"\".join(\n [\"class \" + i for i in generated_code.split(\"class \")[::-1] if i != \"\"]\n )\n return self.generate_imports() + _NEWLINE + generated_code\n\n def convert_to_py(self) -> None:\n \"\"\"\n Convert generated raw code to a Python file.\n \"\"\"\n return FileHandler.write_to_file(\n filename=self.options.file_name, content=self.raw_code\n )\n\n @staticmethod\n def generate_imports() -> str:\n \"\"\"\n Generate import statements in Python syntax based on the given import map.\n\n Returns:\n str: String containing import statements.\n \"\"\"\n import_map = {\"pydantic\": [\"BaseModel\"], \"typing\": [\"Any\", \"Optional\"]}\n imports = \"\"\n for package, items in import_map.items():\n imports += f\"from {package} import {', '.join(items)}{_NEWLINE}\"\n return imports\n\n @staticmethod\n def snake_to_pascal(word):\n \"\"\"\n Convert a snake_case string to PascalCase.\n\n Args:\n word (str): A string in snake_case format.\n\n Returns:\n str: The input string converted to PascalCase.\n \"\"\"\n return \"\".join(x.title() for x in word.split(\"_\"))"
},
{
"identifier": "FileHandler",
"path": "src/jsonpyd/util.py",
"snippet": "class FileHandler:\n \"\"\"\n A utility class for file handling operations.\n \"\"\"\n\n @staticmethod\n def _perform_file_operation(filename: str, mode: str, content: str = None):\n \"\"\"\n Perform a file operation (e.g., create, write) based on the specified mode.\n\n Args:\n filename (str): The name of the file to perform the operation on.\n mode (str): The mode for file operation ('w' for write, 'r' for read, etc.).\n content (str, optional): The content to be written to the file (for 'w' mode). Defaults to None.\n\n Raises:\n FileNotFoundError: If the specified file is not found.\n Exception: For any other unforeseen exceptions during file operations.\n \"\"\"\n mappings = {\"w\": \"write\", \"r\": \"read\"}\n try:\n with open(filename, mode) as file:\n return getattr(file, mappings.get(mode))(content)\n except FileNotFoundError as e:\n raise e\n except Exception as e:\n raise e\n\n @staticmethod\n def create_file(filename: str) -> None:\n \"\"\"\n Create a new file with the given filename.\n\n Args:\n filename (str): The name of the file to create.\n \"\"\"\n FileHandler._perform_file_operation(filename, \"w\")\n\n @staticmethod\n def read_file(path: str):\n \"\"\"\n Read a file with the given path.\n\n Args:\n path (str): The path of the file to read.\n \"\"\"\n return FileHandler._perform_file_operation(path, \"r\")\n\n @staticmethod\n def write_to_file(filename: str, content: str, file_type: str = \"py\") -> None:\n \"\"\"\n Write content to a file with the given filename and file type.\n\n Args:\n filename (str): The name of the file to write to.\n content (str): The content to be written to the file.\n file_type (str, optional): The type of the file to create. Defaults to \"py\".\n \"\"\"\n FileHandler._perform_file_operation(f\"{filename}.{file_type}\", \"w\", content)"
}
] | from .jsonpyd import JsonPyd
from .util import FileHandler
from argparse import ArgumentParser
from datetime import date | 1,797 |
class CLI:
def __init__(self):
self.args = self.parse_arguments()
def parse_arguments(self):
parser = ArgumentParser(description="JsonPyd command line arguments")
parser.add_argument(
"schema_path", type=str, help="Path of the referenced schema file."
)
parser.add_argument(
"--apply_snake_case",
type=bool,
default=True,
help="Apply snake_case to variables.",
)
parser.add_argument(
"--force_optional",
type=bool,
default=False,
help="Make variables optional by default.",
)
parser.add_argument(
"--file_name",
type=str,
default=f'{date.today().strftime("%d-%m-%Y")}_schema',
help="Name of the output file.",
)
return parser.parse_args()
def run(self):
|
class CLI:
def __init__(self):
self.args = self.parse_arguments()
def parse_arguments(self):
parser = ArgumentParser(description="JsonPyd command line arguments")
parser.add_argument(
"schema_path", type=str, help="Path of the referenced schema file."
)
parser.add_argument(
"--apply_snake_case",
type=bool,
default=True,
help="Apply snake_case to variables.",
)
parser.add_argument(
"--force_optional",
type=bool,
default=False,
help="Make variables optional by default.",
)
parser.add_argument(
"--file_name",
type=str,
default=f'{date.today().strftime("%d-%m-%Y")}_schema',
help="Name of the output file.",
)
return parser.parse_args()
def run(self): | schema = FileHandler.read_file(path=self.args.schema_path) | 1 | 2023-12-12 18:11:16+00:00 | 4k |
wusize/CLIM | ovdet/ovdet/models/vlms/clip/clip.py | [
{
"identifier": "build_model",
"path": "ovdet/ovdet/models/vlms/clip/openai_model.py",
"snippet": "def build_model(state_dict, state_file, use_image_encoder, use_text_encoder=True, **kwargs):\n vit = \"visual.proj\" in state_dict\n\n if vit:\n vision_width = state_dict[\"visual.conv1.weight\"].shape[0]\n vision_layers = len([k for k in state_dict.keys() if k.startswith(\"visual.\") and k.endswith(\".attn.in_proj_weight\")])\n vision_patch_size = state_dict[\"visual.conv1.weight\"].shape[-1]\n grid_size = round((state_dict[\"visual.positional_embedding\"].shape[0] - 1) ** 0.5)\n image_resolution = vision_patch_size * grid_size\n else:\n counts: list = [len(set(k.split(\".\")[2] for k in state_dict if k.startswith(f\"visual.layer{b}\"))) for b in [1, 2, 3, 4]]\n vision_layers = tuple(counts)\n vision_width = state_dict[\"visual.layer1.0.conv1.weight\"].shape[0]\n output_width = round((state_dict[\"visual.attnpool.positional_embedding\"].shape[0] - 1) ** 0.5)\n vision_patch_size = None\n assert output_width ** 2 + 1 == state_dict[\"visual.attnpool.positional_embedding\"].shape[0]\n image_resolution = output_width * 32\n\n embed_dim = state_dict[\"text_projection\"].shape[1]\n context_length = state_dict[\"positional_embedding\"].shape[0]\n vocab_size = state_dict[\"token_embedding.weight\"].shape[0]\n transformer_width = state_dict[\"ln_final.weight\"].shape[0]\n transformer_heads = transformer_width // 64\n transformer_layers = len(set(k.split(\".\")[2] for k in state_dict if k.startswith(f\"transformer.resblocks\")))\n\n model = CLIP(\n embed_dim=embed_dim,\n state_file=state_file,\n # vision\n use_image_encoder=use_image_encoder,\n image_resolution=image_resolution,\n vision_layers=vision_layers,\n vision_width=vision_width,\n vision_patch_size=vision_patch_size,\n # text\n use_text_encoder=use_text_encoder,\n context_length=context_length,\n vocab_size=vocab_size,\n transformer_width=transformer_width,\n transformer_heads=transformer_heads,\n transformer_layers=transformer_layers)\n\n for key in [\"input_resolution\", \"context_length\", \"vocab_size\"]:\n if key in state_dict:\n del state_dict[key]\n\n convert_weights(model)\n # model.load_state_dict(state_dict)\n return model.eval()"
},
{
"identifier": "SimpleTokenizer",
"path": "ovdet/ovdet/models/vlms/clip/simple_tokenizer.py",
"snippet": "class SimpleTokenizer(object):\n def __init__(self, bpe_path: str = default_bpe()):\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n merges = gzip.open(bpe_path).read().decode(\"utf-8\").split('\\n')\n merges = merges[1:49152-256-2+1]\n merges = [tuple(merge.split()) for merge in merges]\n vocab = list(bytes_to_unicode().values())\n vocab = vocab + [v+'</w>' for v in vocab]\n for merge in merges:\n vocab.append(''.join(merge))\n vocab.extend(['<|startoftext|>', '<|endoftext|>'])\n self.encoder = dict(zip(vocab, range(len(vocab))))\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}\n self.pat = re.compile(r\"\"\"<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\", re.IGNORECASE)\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token[:-1]) + ( token[-1] + '</w>',)\n pairs = get_pairs(word)\n\n if not pairs:\n return token+'</w>'\n\n while True:\n bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word)-1 and word[i+1] == second:\n new_word.append(first+second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n self.cache[token] = word\n return word\n\n def encode(self, text):\n bpe_tokens = []\n text = whitespace_clean(basic_clean(text)).lower()\n for token in re.findall(self.pat, text):\n token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))\n bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))\n return bpe_tokens\n\n def decode(self, tokens):\n text = ''.join([self.decoder[token] for token in tokens])\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=\"replace\").replace('</w>', ' ')\n return text"
}
] | import hashlib
import os
import urllib
import warnings
import torch
from typing import Any, Union, List
from pkg_resources import packaging
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .openai_model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
from torchvision.transforms import InterpolationMode | 1,659 |
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize", "tokenize_dynamic", "get_only_word_tokens"]
|
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize", "tokenize_dynamic", "get_only_word_tokens"] | _tokenizer = _Tokenizer() | 0 | 2023-12-09 05:43:08+00:00 | 4k |
eezkni/ColNeRF | src/model/models.py | [
{
"identifier": "ImageEncoder",
"path": "src/model/encoder.py",
"snippet": "class ImageEncoder(nn.Module):\n \"\"\"\n Global image encoder\n \"\"\"\n\n def __init__(self, backbone=\"resnet34\", pretrained=True, latent_size=128):\n \"\"\"\n :param backbone Backbone network. Assumes it is resnet*\n e.g. resnet34 | resnet50\n :param num_layers number of resnet layers to use, 1-5\n :param pretrained Whether to use model pretrained on ImageNet\n \"\"\"\n super().__init__()\n self.model = getattr(torchvision.models, backbone)(pretrained=pretrained)\n self.model.fc = nn.Sequential()\n self.register_buffer(\"latent\", torch.empty(1, 1), persistent=False)\n # self.latent (B, L)\n self.latent_size = latent_size\n if latent_size != 512:\n self.fc = nn.Linear(512, latent_size)\n\n def index(self, uv, cam_z=None, image_size=(), z_bounds=()):\n \"\"\"\n Params ignored (compatibility)\n :param uv (B, N, 2) only used for shape\n :return latent vector (B, L, N)\n \"\"\"\n return self.latent.unsqueeze(-1).expand(-1, -1, uv.shape[1])\n\n def forward(self, x):\n \"\"\"\n For extracting ResNet's features.\n :param x image (B, C, H, W)\n :return latent (B, latent_size)\n \"\"\"\n x = x.to(device=self.latent.device)\n x = self.model.conv1(x)\n x = self.model.bn1(x)\n x = self.model.relu(x)\n\n x = self.model.maxpool(x)\n x = self.model.layer1(x)\n x = self.model.layer2(x)\n x = self.model.layer3(x)\n x = self.model.layer4(x)\n\n x = self.model.avgpool(x)\n x = torch.flatten(x, 1)\n\n if self.latent_size != 512:\n x = self.fc(x)\n\n self.latent = x # (B, latent_size)\n return self.latent\n\n @classmethod\n def from_conf(cls, conf):\n return cls(\n conf.get_string(\"backbone\"),\n pretrained=conf.get_bool(\"pretrained\", True),\n latent_size=conf.get_int(\"latent_size\", 128),\n )"
},
{
"identifier": "PositionalEncoding",
"path": "src/model/code.py",
"snippet": "class PositionalEncoding(torch.nn.Module):\n \"\"\"\n Implement NeRF's positional encoding\n \"\"\"\n\n def __init__(self, num_freqs=6, d_in=3, freq_factor=np.pi, include_input=True):\n super().__init__()\n self.num_freqs = num_freqs\n self.d_in = d_in\n self.freqs = freq_factor * 2.0 ** torch.arange(0, num_freqs)\n self.d_out = self.num_freqs * 2 * d_in\n self.include_input = include_input\n if include_input:\n self.d_out += d_in\n # f1 f1 f2 f2 ... to multiply x by\n self.register_buffer(\n \"_freqs\", torch.repeat_interleave(self.freqs, 2).view(1, -1, 1)\n )\n # 0 pi/2 0 pi/2 ... so that\n # (sin(x + _phases[0]), sin(x + _phases[1]) ...) = (sin(x), cos(x)...)\n _phases = torch.zeros(2 * self.num_freqs)\n _phases[1::2] = np.pi * 0.5\n self.register_buffer(\"_phases\", _phases.view(1, -1, 1))\n\n def forward(self, x):\n \"\"\"\n Apply positional encoding (new implementation)\n :param x (batch, self.d_in)\n :return (batch, self.d_out)\n \"\"\"\n with profiler.record_function(\"positional_enc\"):\n embed = x.unsqueeze(1).repeat(1, self.num_freqs * 2, 1)\n embed = torch.sin(torch.addcmul(self._phases, embed, self._freqs))\n embed = embed.view(x.shape[0], -1)\n if self.include_input:\n embed = torch.cat((x, embed), dim=-1)\n return embed\n\n @classmethod\n def from_conf(cls, conf, d_in=3):\n # PyHocon construction\n return cls(\n conf.get_int(\"num_freqs\", 6),\n d_in,\n conf.get_float(\"freq_factor\", np.pi),\n conf.get_bool(\"include_input\", True),\n )"
},
{
"identifier": "make_encoder",
"path": "src/model/model_util.py",
"snippet": "def make_encoder(conf, **kwargs):\n enc_type = conf.get_string(\"type\", \"spatial\") # spatial | global\n if enc_type == \"spatial\":\n net = SpatialEncoder.from_conf(conf, **kwargs)\n elif enc_type == \"global\":\n net = ImageEncoder.from_conf(conf, **kwargs)\n else:\n raise NotImplementedError(\"Unsupported encoder type\")\n return net"
},
{
"identifier": "make_mlp",
"path": "src/model/model_util.py",
"snippet": "def make_mlp(conf, d_in, d_latent=0, allow_empty=False, **kwargs):\n mlp_type = conf.get_string(\"type\", \"mlp\") # mlp | resnet\n if mlp_type == \"mlp\":\n net = ImplicitNet.from_conf(conf, d_in + d_latent, **kwargs)\n elif mlp_type == \"resnet\":\n net = ResnetFC.from_conf(conf, d_in, d_latent=d_latent, **kwargs)\n elif mlp_type == \"empty\" and allow_empty:\n net = None\n else:\n raise NotImplementedError(\"Unsupported MLP type\")\n return net"
},
{
"identifier": "InterviewFusion",
"path": "src/model/InterviewAttention.py",
"snippet": "class InterviewFusion(torch.nn.Module):\n def __init__(self, base_ch=512, nviews=1, num_heads=4, block_size=8, halo_size=3, sr = 1, checkpoint=False, **kwargs,):\n super(InterviewFusion, self).__init__()\n self.trans = TransformerBlock(base_ch, block_size=block_size, halo_size=halo_size,\n num_heads=num_heads, sr = sr, checkpoint=checkpoint)\n self.nviews = nviews\n \n def forward(self, latents):\n initial_latent = latents\n H = latents.shape[-2]\n W = latents.shape[-1]\n C = latents.shape[-3]\n x = latents.reshape(-1, self.nviews, C, H, W)# B , Nviews, ...\n \n if self.nviews > 1:\n x = torch.unbind(x, dim=1)\n inter_value = []\n for i in range(self.nviews):\n src = x[i]\n ref = x[:i] + x[i+1:]\n aux = torch.zeros_like(src)\n for j in range(self.nviews-1):\n aux = aux + ref[j]\n inter_value.append(self.trans([src, aux]))\n \n return inter_value"
}
] | import torch
import torch.autograd.profiler as profiler
import os
import os.path as osp
import warnings
from .encoder import ImageEncoder
from .code import PositionalEncoding
from .model_util import make_encoder, make_mlp
from .InterviewAttention import InterviewFusion
from util import repeat_interleave
from cmath import isnan
from shutil import copyfile
from shutil import copyfile | 1,835 | """
Main model implementation
"""
class ColNeRFNet(torch.nn.Module):
def __init__(self, conf, nviews, data_format=None, stop_encoder_grad=False):
"""
:param conf PyHocon config subtree 'model'
"""
super().__init__()
| """
Main model implementation
"""
class ColNeRFNet(torch.nn.Module):
def __init__(self, conf, nviews, data_format=None, stop_encoder_grad=False):
"""
:param conf PyHocon config subtree 'model'
"""
super().__init__() | self.encoder = make_encoder(conf["encoder"]) | 2 | 2023-12-12 13:06:50+00:00 | 4k |
ku-dmlab/PORelDICE | train_offline.py | [
{
"identifier": "D4RLDataset",
"path": "dataset_utils.py",
"snippet": "class D4RLDataset(Dataset):\n def __init__(\n self,\n env: gym.Env,\n add_env: gym.Env = \"None\",\n expert_ratio: float = 1.0,\n clip_to_eps: bool = True,\n heavy_tail: bool = False,\n heavy_tail_higher: float = 0.0,\n eps: float = 1e-5,\n ):\n dataset = d4rl.qlearning_dataset(env)\n if add_env != \"None\":\n add_data = d4rl.qlearning_dataset(add_env)\n if expert_ratio >= 1:\n raise ValueError(\"in the mix setting, the expert_ratio must < 1\")\n length_add_data = int(add_data[\"rewards\"].shape[0] * (1 - expert_ratio))\n length_expert_data = int(length_add_data * expert_ratio)\n for k, _ in dataset.items():\n dataset[k] = np.concatenate(\n [\n add_data[k][:-length_expert_data],\n dataset[k][:length_expert_data],\n ],\n axis=0,\n )\n print(\"-------------------------------\")\n print(\n f\"we are in the mix data regimes, len(expert):{length_expert_data} | len(add_data): {length_add_data} | expert ratio: {expert_ratio}\"\n )\n print(\"-------------------------------\")\n\n if heavy_tail:\n dataset = d4rl.qlearning_dataset(\n env, heavy_tail=True, heavy_tail_higher=heavy_tail_higher\n )\n if clip_to_eps:\n lim = 1 - eps\n dataset[\"actions\"] = np.clip(dataset[\"actions\"], -lim, lim)\n\n dones_float = np.zeros_like(dataset[\"rewards\"])\n\n # To get initial observations\n initial_observations = [dataset['observations'][0].astype(np.float32)]\n \n for i in range(len(dones_float) - 1):\n if (\n np.linalg.norm(\n dataset[\"observations\"][i + 1] - dataset[\"next_observations\"][i]\n )\n > 1e-6\n or dataset[\"terminals\"][i] == 1.0\n ):\n dones_float[i] = 1\n\n # add initial observation at next step of done signal\n if i < len(dones_float) - 1:\n initial_observations.append(dataset['observations'][i+1].astype(np.float32))\n else:\n dones_float[i] = 0\n\n dones_float[-1] = 1\n\n super().__init__(\n dataset[\"observations\"].astype(np.float32),\n initial_observations = np.array(initial_observations, dtype=np.float32),\n actions=dataset[\"actions\"].astype(np.float32),\n rewards=dataset[\"rewards\"].astype(np.float32),\n masks=1.0 - dataset[\"terminals\"].astype(np.float32),\n dones_float=dones_float.astype(np.float32),\n next_observations=dataset[\"next_observations\"].astype(np.float32),\n size=len(dataset[\"observations\"]),\n )"
},
{
"identifier": "Log",
"path": "dataset_utils.py",
"snippet": "class Log:\n def __init__(\n self,\n root_log_dir,\n cfg_dict,\n txt_filename=\"log.txt\",\n csv_filename=\"progress.csv\",\n cfg_filename=\"config.json\",\n flush=True,\n ):\n self.dir = Path(root_log_dir) / _gen_dir_name()\n self.dir.mkdir(parents=True)\n self.txt_file = open(self.dir / txt_filename, \"w\")\n self.csv_file = None\n (self.dir / cfg_filename).write_text(json.dumps(cfg_dict))\n self.txt_filename = txt_filename\n self.csv_filename = csv_filename\n self.cfg_filename = cfg_filename\n self.flush = flush\n\n def write(self, message, end=\"\\n\"):\n now_str = datetime.now().strftime(\"%H:%M:%S\")\n message = f\"[{now_str}] \" + message\n for f in [sys.stdout, self.txt_file]:\n print(message, end=end, file=f, flush=self.flush)\n\n def __call__(self, *args, **kwargs):\n self.write(*args, **kwargs)\n\n def row(self, dict):\n if self.csv_file is None:\n self.csv_file = open(self.dir / self.csv_filename, \"w\", newline=\"\")\n self.csv_writer = csv.DictWriter(self.csv_file, list(dict.keys()))\n self.csv_writer.writeheader()\n\n self(str(dict))\n self.csv_writer.writerow(dict)\n if self.flush:\n self.csv_file.flush()\n\n def close(self):\n self.txt_file.close()\n if self.csv_file is not None:\n self.csv_file.close()"
},
{
"identifier": "split_into_trajectories",
"path": "dataset_utils.py",
"snippet": "def split_into_trajectories(\n observations, actions, rewards, masks, dones_float, next_observations\n):\n trajs = [[]]\n\n for i in tqdm(range(len(observations))):\n trajs[-1].append(\n (\n observations[i],\n actions[i],\n rewards[i],\n masks[i],\n dones_float[i],\n next_observations[i],\n )\n )\n if dones_float[i] == 1.0 and i + 1 < len(observations):\n trajs.append([])\n\n return trajs"
},
{
"identifier": "evaluate",
"path": "evaluation.py",
"snippet": "def evaluate(\n env_name: str, agent: nn.Module, env: gym.Env, num_episodes: int\n) -> Dict[str, float]:\n # stats = {'return': [], 'length': []}\n total_reward_ = []\n for _ in range(num_episodes):\n observation, done = env.reset(), False\n total_reward = 0.0\n while not done:\n action = agent.sample_actions(observation, temperature=0.0)\n observation, reward, done, info = env.step(action)\n total_reward += reward\n total_reward_.append(total_reward)\n\n average_return = np.array(total_reward_).mean()\n normalized_return = d4rl.get_normalized_score(env_name, average_return) * 100\n return normalized_return"
},
{
"identifier": "Learner",
"path": "learner.py",
"snippet": "class Learner(object):\n def __init__(\n self,\n seed: int,\n observations: jnp.ndarray,\n actions: jnp.ndarray,\n actor_lr: float = 3e-4,\n value_lr: float = 3e-4,\n critic_lr: float = 3e-4,\n hidden_dims: Sequence[int] = (256, 256),\n discount: float = 0.99,\n tau: float = 0.005,\n alpha: float = 0.1,\n epsilon: float = 0.0 ,\n dropout_rate: Optional[float] = None,\n value_dropout_rate: Optional[float] = None,\n layernorm: bool = False,\n max_steps: Optional[int] = None,\n max_clip: Optional[int] = None,\n mix_dataset: Optional[str] = None,\n alg: Optional[str] = None,\n opt_decay_schedule: str = \"cosine\",\n ):\n self.tau = tau\n self.discount = discount\n self.alpha = alpha\n self.epsilon = epsilon\n\n self.max_clip = max_clip\n self.alg = alg\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key, critic_key, value_key = jax.random.split(rng, 4)\n\n \n action_dim = actions.shape[-1]\n actor_def = policy.NormalTanhPolicy(\n hidden_dims,\n action_dim,\n log_std_scale=1e-3,\n log_std_min=-5.0,\n dropout_rate=dropout_rate,\n state_dependent_std=False,\n tanh_squash_distribution=False,\n )\n\n if opt_decay_schedule == \"cosine\":\n schedule_fn = optax.cosine_decay_schedule(-actor_lr, max_steps)\n optimiser = optax.chain(\n optax.scale_by_adam(), optax.scale_by_schedule(schedule_fn)\n )\n else:\n optimiser = optax.adam(learning_rate=actor_lr)\n actor = Model.create(actor_def, inputs=[actor_key, observations], tx=optimiser)\n\n value_def = value_net.ValueCritic(\n hidden_dims, layer_norm=layernorm, dropout_rate=value_dropout_rate\n )\n value = Model.create(\n value_def,\n inputs=[value_key, observations],\n tx=optax.adam(learning_rate=value_lr),\n )\n\n critic_def = value_net.DoubleCritic(hidden_dims)\n critic = Model.create(\n critic_def,\n inputs=[critic_key, observations, actions],\n tx=optax.adam(learning_rate=critic_lr),\n )\n target_critic = Model.create(\n critic_def, inputs=[critic_key, observations, actions]\n )\n\n self.actor = actor\n self.value = value\n self.rng = rng\n\n self.critic = critic\n self.target_critic = target_critic\n \n\n def sample_actions(\n self, observations: np.ndarray, temperature: float = 1.0\n ) -> jnp.ndarray:\n rng, actions = policy.sample_actions(\n self.rng, self.actor.apply_fn, self.actor.params, observations, temperature\n )\n self.rng = rng\n\n actions = np.asarray(actions)\n return np.clip(actions, -1, 1)\n\n def update(self, batch: Batch) -> InfoDict:\n # type <class 'str'> is not a valid JAX type.\n if self.alg == \"PORelDICE\":\n (\n new_rng,\n new_actor,\n new_critic,\n new_value,\n new_target_critic,\n info,\n ) = _update_jit_PORelDICE(\n self.rng,\n self.actor,\n self.critic,\n self.value,\n self.target_critic,\n batch,\n self.discount,\n self.tau,\n self.alpha,\n self.epsilon\n )\n else:\n raise NotImplementedError(f\"{self.alg} is not implemented.\")\n\n self.actor = new_actor\n self.value = new_value\n self.rng = new_rng\n \n self.critic = new_critic\n self.target_critic = new_target_critic\n\n\n return info"
}
] | from pathlib import Path
from typing import Tuple
from absl import app, flags
from ml_collections import config_flags
from dataset_utils import D4RLDataset, Log, split_into_trajectories
from evaluation import evaluate
from learner import Learner
import gym
import numpy as np
import tqdm
import wandb
import wrappers | 3,131 |
FLAGS = flags.FLAGS
flags.DEFINE_string("env_name", "halfcheetah-expert-v2", "Environment name.")
flags.DEFINE_string("save_dir", "./results/", "Tensorboard logging dir.")
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("eval_episodes", 5, "Number of episodes used for evaluation.")
flags.DEFINE_integer("log_interval", 1000, "Logging interval.")
flags.DEFINE_integer("eval_interval", 10000, "Eval interval.")
flags.DEFINE_integer("batch_size", 256, "Mini batch size.")
flags.DEFINE_integer("max_steps", int(1e6), "Number of training steps.")
flags.DEFINE_string("mix_dataset", "None", "mix the dataset")
flags.DEFINE_boolean("tqdm", True, "Use tqdm progress bar.")
flags.DEFINE_string("alg", "PORelDICE", "the training algorithm")
flags.DEFINE_float("alpha", 1.0, "temperature")
flags.DEFINE_float("epsilon", -1.0, "epsilon")
config_flags.DEFINE_config_file(
"config",
"default.py",
"File path to the training hyperparameter configuration.",
lock_config=False,
)
def normalize(dataset):
trajs = split_into_trajectories(
dataset.observations,
dataset.actions,
dataset.rewards,
dataset.masks,
dataset.dones_float,
dataset.next_observations,
)
def compute_returns(traj):
episode_return = 0
for _, _, rew, _, _, _ in traj:
episode_return += rew
return episode_return
trajs.sort(key=compute_returns)
dataset.rewards /= compute_returns(trajs[-1]) - compute_returns(trajs[0])
dataset.rewards *= 1000.0
def make_env_and_dataset(env_name: str, seed: int) -> Tuple[gym.Env, D4RLDataset]:
env = gym.make(env_name)
env = wrappers.EpisodeMonitor(env)
env = wrappers.SinglePrecision(env)
env.seed(seed=seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
dataset = D4RLDataset(env)
if "antmaze" in FLAGS.env_name:
dataset.rewards -= 1.0
elif (
"halfcheetah" in FLAGS.env_name
or "walker2d" in FLAGS.env_name
or "hopper" in FLAGS.env_name
):
# pass
normalize(dataset)
return env, dataset
def main(_):
env, dataset = make_env_and_dataset(FLAGS.env_name, FLAGS.seed)
kwargs = dict(FLAGS.config)
kwargs["alpha"] = FLAGS.alpha
kwargs["alg"] = FLAGS.alg
kwargs["epsilon"] = FLAGS.epsilon
|
FLAGS = flags.FLAGS
flags.DEFINE_string("env_name", "halfcheetah-expert-v2", "Environment name.")
flags.DEFINE_string("save_dir", "./results/", "Tensorboard logging dir.")
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("eval_episodes", 5, "Number of episodes used for evaluation.")
flags.DEFINE_integer("log_interval", 1000, "Logging interval.")
flags.DEFINE_integer("eval_interval", 10000, "Eval interval.")
flags.DEFINE_integer("batch_size", 256, "Mini batch size.")
flags.DEFINE_integer("max_steps", int(1e6), "Number of training steps.")
flags.DEFINE_string("mix_dataset", "None", "mix the dataset")
flags.DEFINE_boolean("tqdm", True, "Use tqdm progress bar.")
flags.DEFINE_string("alg", "PORelDICE", "the training algorithm")
flags.DEFINE_float("alpha", 1.0, "temperature")
flags.DEFINE_float("epsilon", -1.0, "epsilon")
config_flags.DEFINE_config_file(
"config",
"default.py",
"File path to the training hyperparameter configuration.",
lock_config=False,
)
def normalize(dataset):
trajs = split_into_trajectories(
dataset.observations,
dataset.actions,
dataset.rewards,
dataset.masks,
dataset.dones_float,
dataset.next_observations,
)
def compute_returns(traj):
episode_return = 0
for _, _, rew, _, _, _ in traj:
episode_return += rew
return episode_return
trajs.sort(key=compute_returns)
dataset.rewards /= compute_returns(trajs[-1]) - compute_returns(trajs[0])
dataset.rewards *= 1000.0
def make_env_and_dataset(env_name: str, seed: int) -> Tuple[gym.Env, D4RLDataset]:
env = gym.make(env_name)
env = wrappers.EpisodeMonitor(env)
env = wrappers.SinglePrecision(env)
env.seed(seed=seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
dataset = D4RLDataset(env)
if "antmaze" in FLAGS.env_name:
dataset.rewards -= 1.0
elif (
"halfcheetah" in FLAGS.env_name
or "walker2d" in FLAGS.env_name
or "hopper" in FLAGS.env_name
):
# pass
normalize(dataset)
return env, dataset
def main(_):
env, dataset = make_env_and_dataset(FLAGS.env_name, FLAGS.seed)
kwargs = dict(FLAGS.config)
kwargs["alpha"] = FLAGS.alpha
kwargs["alg"] = FLAGS.alg
kwargs["epsilon"] = FLAGS.epsilon | agent = Learner( | 4 | 2023-12-11 07:47:22+00:00 | 4k |
Anashel-RPG/echoai | job_manager.py | [
{
"identifier": "download_image",
"path": "image_downloader.py",
"snippet": "def download_image(image_url, local_path, job_id, prompt, additional_metadata):\r\n logging.info(f\"Initiating download: URL {image_url}, Local Path {local_path}, Job ID {job_id}, Prompt {prompt[:30]}...\")\r\n\r\n try:\r\n response = requests.get(image_url, stream=True)\r\n response.raise_for_status()\r\n\r\n # Read image from response\r\n image_data = response.content\r\n image = Image.open(BytesIO(image_data))\r\n\r\n # Draw text on the image\r\n draw = ImageDraw.Draw(image)\r\n font = ImageFont.load_default(size=28) # Specifying font size\r\n text = prompt.split(',')[0] # Extract first part of the prompt\r\n\r\n # Positioning the text at top left (10, 10)\r\n # draw.text((20, 10), text, font=font)\r\n\r\n # Prepare metadata (EXIF) with additional fields\r\n exif_dict = {\r\n \"0th\": {},\r\n \"Exif\": {},\r\n \"1st\": {},\r\n \"thumbnail\": None,\r\n \"GPS\": {} # Optional, if you want to include GPS-related tags\r\n }\r\n exif_dict[\"0th\"][piexif.ImageIFD.Artist] = job_id\r\n exif_dict[\"0th\"][piexif.ImageIFD.ImageDescription] = prompt\r\n\r\n # Concatenate additional metadata into a single string\r\n user_comment = \"; \".join([f\"{key}: {value}\" for key, value in additional_metadata.items()])\r\n\r\n # Encode user comment with ASCII prefix\r\n encoded_comment = b\"ASCII\\x00\\x00\" + user_comment.encode(\"utf-8\")\r\n\r\n # Assign encoded user comment to EXIF\r\n exif_dict[\"Exif\"][piexif.ExifIFD.UserComment] = encoded_comment\r\n\r\n # Generate EXIF bytes\r\n exif_bytes = piexif.dump(exif_dict)\r\n\r\n # Save image with metadata and added text\r\n image.save(local_path, \"jpeg\", exif=exif_bytes)\r\n logging.info(f\"Image downloaded successfully and saved to {local_path}, with embedded text and metadata\")\r\n\r\n except requests.exceptions.HTTPError as e:\r\n logging.error(f\"HTTP error occurred while downloading the image: {e.response.status_code} - {e.response.text}\")\r\n except requests.exceptions.ConnectionError as e:\r\n logging.error(\"Connection error occurred while downloading the image.\")\r\n except requests.exceptions.Timeout as e:\r\n logging.error(\"Timeout error occurred while downloading the image.\")\r\n except requests.exceptions.RequestException as e:\r\n logging.error(f\"An error occurred while downloading the image: {e}\")\r\n except IOError as e:\r\n logging.error(f\"I/O error occurred while saving the image to {local_path}: {e}\")\r\n except Exception as e:\r\n logging.error(f\"An unexpected error occurred while downloading the image: {e}\")\r"
},
{
"identifier": "MAX_CONCURRENT_JOBS",
"path": "config.py",
"snippet": "MAX_CONCURRENT_JOBS = 1\r"
},
{
"identifier": "RATE_LIMIT_DELAY",
"path": "config.py",
"snippet": "RATE_LIMIT_DELAY = timedelta(seconds=2)\r"
},
{
"identifier": "API_BASE_URL",
"path": "config.py",
"snippet": "API_BASE_URL = 'https://cloud.leonardo.ai/api/rest/v1/'\r"
},
{
"identifier": "HEADERS",
"path": "config.py",
"snippet": "HEADERS = {\r\n \"accept\": \"application/json\",\r\n \"authorization\": AUTHORIZATION_TOKEN\r\n}"
},
{
"identifier": "API_CALL_DELAY",
"path": "config.py",
"snippet": "API_CALL_DELAY = 3\r"
},
{
"identifier": "get_job_data",
"path": "job_data_store.py",
"snippet": "def get_job_data(job_id):\r\n global job_data_store\r\n data = job_data_store.get(job_id)\r\n if data:\r\n logging.info(f\"Retrieved job data for ID {job_id}: {data}\")\r\n else:\r\n logging.warning(f\"No job data found for ID {job_id}\")\r\n return data\r"
},
{
"identifier": "store_job_data",
"path": "job_data_store.py",
"snippet": "def store_job_data(job_id, prompt):\r\n global job_data_store\r\n job_data_store[job_id] = {\r\n \"prompt\": prompt\r\n }\r\n # logging.info(f\"Job data stored: ID {job_id}, Prompt {prompt[:30]}...\")\r\n\r\n # Log the current state of the job_data_store\r\n # logging.info(f\"Current state of job_data_store: {job_data_store}\")\r"
}
] | import threading
import time
import os
import json
import requests
import logging
from queue import Queue, Empty
from datetime import datetime
from image_downloader import download_image
from config import MAX_CONCURRENT_JOBS, RATE_LIMIT_DELAY, API_BASE_URL, HEADERS, API_CALL_DELAY
from job_data_store import get_job_data, store_job_data
| 3,015 | download_image(image_url, local_path, job_id, job_data['prompt'], additional_metadata)
print(f"NOW SHOWING: {job_data}")
except Exception as e:
logging.error(f"Error downloading content for job ID {job_id}: {e}")
queue_processor = Queue()
class Job:
def __init__(self, data):
self.data = data
self.status = 'pending'
self.id = None
self.start_time = datetime.now()
self.last_checked = None
self.check_count = 0
self.previous_status = None
self.last_log_time = None
def start(self):
self.id = API.start_job(self.data)
self.start_time = datetime.now()
if self.id:
store_job_data(self.id, self.data['prompt'])
self.status = 'processing'
else:
# Retry once if the job fails to start
logging.info("== WARNING RETRY ==")
logging.info(self.data['prompt'])
time.sleep(5) # Use the configurable delay from config.py
self.id = API.start_job(self.data)
if self.id:
store_job_data(self.id, self.data['prompt']) # Store job data in the job_data_store on successful retry
self.status = 'processing'
else:
self.status = 'failed'
logging.info("== RETRY FAILED ==")
self.last_checked = datetime.now()
def should_log(self):
"""Determines if the current status should be logged."""
current_time = datetime.now()
if self.previous_status != self.status or (
self.last_log_time is None or (current_time - self.last_log_time).total_seconds() > 10):
self.last_log_time = current_time
return True
return False
def check_status(self):
if self.id is None: # Skip processing if job ID is None
logging.error(f"== SKIPPING ID NONE ==")
self.status = 'failed'
return
current_time = datetime.now()
# Initial delay of 10 seconds before the first check
if self.last_checked is None:
if (current_time - self.start_time).total_seconds() < 10:
if self.should_log():
logging.info(f"Initial delay in progress for job ID {self.id}.")
threading.Timer(1, lambda: queue_processor.put(self)).start()
return
self.last_checked = current_time
# Check job status at one-second intervals after the initial delay
if (current_time - self.last_checked).total_seconds() >= 1:
self.last_checked = current_time
self.previous_status = self.status
self.status = API.check_job_status(self.id)
if self.should_log():
logging.info(f"Checked status for job ID {self.id}: {self.status}")
if self.status == 'COMPLETE':
self.status = 'completed'
if self.should_log():
logging.info(f"Job ID {self.id} completed, downloading content.")
API.download_job_content(self.id)
elif (current_time - self.start_time).total_seconds() > 10000000:
self.status = 'failed'
if self.should_log():
logging.error(f"Job ID {self.id} failed due to timeout.")
else:
threading.Timer(1, lambda: queue_processor.put(self)).start()
else:
threading.Timer(1, lambda: queue_processor.put(self)).start()
class JobManager:
def __init__(self):
self.jobs = []
self.active_jobs = 0
self.lock = threading.Lock()
self.empty_queue_count = 0 # Counter for empty queue checks
def run_job(self, job_payloads):
with self.lock:
for payload in job_payloads:
if self.active_jobs < MAX_CONCURRENT_JOBS:
job = Job(payload)
self.jobs.append(job)
job.start()
self.active_jobs += 1
queue_processor.put(job)
logging.info(f"Job {job.id} started.")
else:
self.jobs.append(Job(payload))
logging.info("Maximum concurrent jobs reached, job added to queue.")
def process_queue(self):
while True:
all_jobs_done = len(self.jobs) == 0 and self.active_jobs == 0
if all_jobs_done:
logging.info("All jobs have been processed. Exiting.")
break
try:
| # job_manager.py
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class API:
total_api_credit_cost = 0 # Class-level variable to track the total cost
total_images = 0 # Class-level variable to track the total images
@staticmethod
def start_job(data):
url = API_BASE_URL + 'generations'
headers = HEADERS
payload = json.dumps(data)
try:
logging.info("Calling Leonardo GENERATE")
logging.info("======")
response = requests.post(url, headers=headers, data=payload)
response.raise_for_status() # Raises an HTTPError for certain status codes
job_response = response.json()
job_id = job_response.get('sdGenerationJob', {}).get('generationId')
api_credit_cost = job_response.get('sdGenerationJob', {}).get('apiCreditCost', 0) # Get the credit cost
if job_id:
logging.info(f"Job started with ID: {job_id}, Credit Cost: {api_credit_cost}")
API.total_api_credit_cost += api_credit_cost # Increment the total cost
API.total_images += 1 # Increment the total images
logging.info(f"== TOTAL COST: {API.total_api_credit_cost} API Credits ==")
logging.info(f"== TOTAL IMAGES: {API.total_images} ==")
store_job_data(job_id, data['prompt']) # Store the job ID and prompt
return job_id
else:
logging.error("Failed to start job: No 'generationId' found in response.")
return None
except requests.exceptions.HTTPError as e:
# HTTP error occurred
logging.error(f"HTTP error occurred while starting the job: {e.response.status_code} - {e.response.text}")
except Exception as e:
# Other errors (e.g., network issues, JSON decoding issue, etc.)
logging.error(f"Error starting job: {e}")
return None
@staticmethod
def check_job_status(job_id):
url = API_BASE_URL + f'generations/{job_id}'
headers = HEADERS
# time.sleep(3) # Wait for 1 second before making the API call
time.sleep(API_CALL_DELAY) # Use the configurable delay from config.py
try:
logging.info(f"Calling Leonardo STATUS for job ID {job_id}") # Include job ID in log
response = requests.get(url, headers=headers)
response.raise_for_status()
job_status_response = response.json()
status = job_status_response.get('generations_by_pk', {}).get('status', 'UNKNOWN')
return status
except Exception as e:
logging.error(f"Error checking job status for ID {job_id}: {e}")
return 'UNKNOWN'
@staticmethod
def download_job_content(job_id):
url = API_BASE_URL + f'generations/{job_id}'
headers = HEADERS
try:
logging.info(f"Calling Leonardo CDN DOWNLOAD")
response = requests.get(url, headers=headers)
response.raise_for_status()
job_content_response = response.json()
generated_images = job_content_response.get('generations_by_pk', {}).get('generated_images', [])
# Extract additional metadata
additional_metadata = {
"inferenceSteps": job_content_response.get('generations_by_pk', {}).get('inferenceSteps'),
"seed": job_content_response.get('generations_by_pk', {}).get('seed'),
"presetStyle": job_content_response.get('generations_by_pk', {}).get('presetStyle'),
"initStrength": job_content_response.get('generations_by_pk', {}).get('initStrength'),
"guidanceScale": job_content_response.get('generations_by_pk', {}).get('guidanceScale'),
"promptMagic": job_content_response.get('generations_by_pk', {}).get('promptMagic'),
"promptMagicVersion": job_content_response.get('generations_by_pk', {}).get('promptMagicVersion'),
"promptMagicStrength": job_content_response.get('generations_by_pk', {}).get('promptMagicStrength'),
"photoReal": job_content_response.get('generations_by_pk', {}).get('photoReal'),
"photoRealStrength": job_content_response.get('generations_by_pk', {}).get('photoRealStrength')
}
logging.info(f"Attempting to download content for job ID {job_id}")
job_data = get_job_data(job_id) # Retrieve job data
if job_data: # Check if job data is available
for image in generated_images:
image_url = image.get('url')
if image_url:
local_path = os.path.join("downloaded_images", f"{job_id}_{image.get('id', 'unknown')}.jpg")
logging.info(f"Downloading image: {image_url}")
download_image(image_url, local_path, job_id, job_data['prompt'], additional_metadata)
print(f"NOW SHOWING: {job_data}")
except Exception as e:
logging.error(f"Error downloading content for job ID {job_id}: {e}")
queue_processor = Queue()
class Job:
def __init__(self, data):
self.data = data
self.status = 'pending'
self.id = None
self.start_time = datetime.now()
self.last_checked = None
self.check_count = 0
self.previous_status = None
self.last_log_time = None
def start(self):
self.id = API.start_job(self.data)
self.start_time = datetime.now()
if self.id:
store_job_data(self.id, self.data['prompt'])
self.status = 'processing'
else:
# Retry once if the job fails to start
logging.info("== WARNING RETRY ==")
logging.info(self.data['prompt'])
time.sleep(5) # Use the configurable delay from config.py
self.id = API.start_job(self.data)
if self.id:
store_job_data(self.id, self.data['prompt']) # Store job data in the job_data_store on successful retry
self.status = 'processing'
else:
self.status = 'failed'
logging.info("== RETRY FAILED ==")
self.last_checked = datetime.now()
def should_log(self):
"""Determines if the current status should be logged."""
current_time = datetime.now()
if self.previous_status != self.status or (
self.last_log_time is None or (current_time - self.last_log_time).total_seconds() > 10):
self.last_log_time = current_time
return True
return False
def check_status(self):
if self.id is None: # Skip processing if job ID is None
logging.error(f"== SKIPPING ID NONE ==")
self.status = 'failed'
return
current_time = datetime.now()
# Initial delay of 10 seconds before the first check
if self.last_checked is None:
if (current_time - self.start_time).total_seconds() < 10:
if self.should_log():
logging.info(f"Initial delay in progress for job ID {self.id}.")
threading.Timer(1, lambda: queue_processor.put(self)).start()
return
self.last_checked = current_time
# Check job status at one-second intervals after the initial delay
if (current_time - self.last_checked).total_seconds() >= 1:
self.last_checked = current_time
self.previous_status = self.status
self.status = API.check_job_status(self.id)
if self.should_log():
logging.info(f"Checked status for job ID {self.id}: {self.status}")
if self.status == 'COMPLETE':
self.status = 'completed'
if self.should_log():
logging.info(f"Job ID {self.id} completed, downloading content.")
API.download_job_content(self.id)
elif (current_time - self.start_time).total_seconds() > 10000000:
self.status = 'failed'
if self.should_log():
logging.error(f"Job ID {self.id} failed due to timeout.")
else:
threading.Timer(1, lambda: queue_processor.put(self)).start()
else:
threading.Timer(1, lambda: queue_processor.put(self)).start()
class JobManager:
def __init__(self):
self.jobs = []
self.active_jobs = 0
self.lock = threading.Lock()
self.empty_queue_count = 0 # Counter for empty queue checks
def run_job(self, job_payloads):
with self.lock:
for payload in job_payloads:
if self.active_jobs < MAX_CONCURRENT_JOBS:
job = Job(payload)
self.jobs.append(job)
job.start()
self.active_jobs += 1
queue_processor.put(job)
logging.info(f"Job {job.id} started.")
else:
self.jobs.append(Job(payload))
logging.info("Maximum concurrent jobs reached, job added to queue.")
def process_queue(self):
while True:
all_jobs_done = len(self.jobs) == 0 and self.active_jobs == 0
if all_jobs_done:
logging.info("All jobs have been processed. Exiting.")
break
try:
| job = queue_processor.get(timeout=RATE_LIMIT_DELAY.total_seconds())
| 2 | 2023-12-09 16:16:39+00:00 | 4k |
digitalfortress-dev/python-sqs-client | examples/publish.py | [
{
"identifier": "SQSClient",
"path": "sqs_client/client.py",
"snippet": "class SQSClient:\n \"\"\"\n This class represents a client for interacting with the SQS service.\n\n It provides methods for sending and receiving messages.\n \"\"\"\n\n def __init__(\n self,\n region_name=None,\n aws_access_key_id=None,\n aws_secret_access_key=None,\n ):\n \"\"\"\n Initializes the SQSClient class.\n\n Args:\n region_name: (string) The name of the region associated with the client.\n aws_access_key_id: (string) The access key to use when creating\n the client. This is entirely optional, and if not provided,\n the credentials configured for the session will automatically\n be used. You only need to provide this argument if you want\n to override the credentials used for this specific client.\n aws_access_key_id: (string) The access key to use when creating\n the client. This is entirely optional, and if not provided,\n the credentials configured for the session will automatically\n be used. You only need to provide this argument if you want\n to override the credentials used for this specific client.\n \"\"\"\n self._boto3_client = boto3.client(\n \"sqs\",\n region_name=region_name,\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n )\n self._list_queue_urls = None\n self._task_list = {}\n\n def get_task_list(self):\n \"\"\"\n This function retrieves the list of tasks currently\n Returns:\n The list of tasks.\n \"\"\"\n return self._task_list\n\n def check_health(self):\n \"\"\"\n This function performs a health check on the entire task list, analyzing the health of all its registered tasks\n Returns:\n (bool) The health of all its registered tasks.\n \"\"\"\n return all([task.check_health() for task in self._task_list.values()])\n\n def task(\n self,\n queue_name,\n max_number_of_messages=1,\n visibility_timeout=30,\n wait_time_seconds=20,\n lazy=False,\n daemon=True,\n ):\n \"\"\"\n Decorator to create a task object out of any callable.\n\n Args:\n queue_name: (string) The name of the SQS queue you want to create a task.\n max_number_of_messages: (integer) The maximum number of messages to return.\n Valid values: 1 to 10. Default: 1.\n visibility_timeout: (integer) The duration (in seconds) that the received messages are hidden from\n subsequent retrieve requests after being retrieved by a ReceiveMessage request.\n Default: 30\n wait_time_seconds: (integer) The duration (in seconds) for which the call waits for a message\n to arrive in the queue before returning.\n Default: 20.\n lazy: (bool) Make this task lazy mode. Trigger SQS message by task_name.trigger(*args, **kwargs)\n daemon: (bool) Make this task daemon mode.\n The entire Python program exits when no alive non-daemon threads are left.\n\n Examples:\n @sqs_client.task(queue=\"dev-retailer_getting_order_sqs\")\n def test_task(message):\n print(message)\n\n Returns:\n The task object.\n \"\"\"\n\n def inner_create_task(callback):\n task = Task(\n sqs_client=self,\n queue_name=queue_name,\n callback=callback,\n max_number_of_messages=max_number_of_messages,\n visibility_timeout=visibility_timeout,\n wait_time_seconds=wait_time_seconds,\n lazy=lazy,\n daemon=daemon,\n )\n self._task_list[task.get_id()] = task\n return task\n\n return inner_create_task\n\n def get_queue_url_by_name(self, queue_name):\n \"\"\"\n This function retrieves the URL of an SQS queue based on its name.\n\n Args:\n queue_name: (string) The name of the SQS queue you want to find.\n \"\"\"\n if not self._list_queue_urls:\n response = self._boto3_client.list_queues()\n\n self._list_queue_urls = response[\"QueueUrls\"]\n\n for queue_url in self._list_queue_urls:\n if queue_url.endswith(f\"/{queue_name}\"):\n return queue_url\n\n return None\n\n def delete_message(self, queue_name, message):\n \"\"\"\n This function permanently removes a message from an SQS queue.\n\n Args:\n queue_name: (string) The name of the SQS queue you want to delete message.\n message: The SQS message you want to remove.\n \"\"\"\n receipt_handle = message[\"ReceiptHandle\"]\n self._boto3_client.delete_message(\n QueueUrl=self.get_queue_url_by_name(queue_name),\n ReceiptHandle=receipt_handle,\n )\n\n def subscribe(\n self,\n queue_name,\n callback,\n max_number_of_messages=1,\n visibility_timeout=30,\n wait_time_seconds=20,\n ):\n \"\"\"\n This function continuously receives messages from an SQS queue and processes them through a callback.\n\n Args:\n queue_name: (string) The name of the SQS queue you want to receives messages.\n callback: (function) The callback function you want to use to process the message.\n max_number_of_messages: (integer) The maximum number of messages to return.\n Valid values: 1 to 10. Default: 1.\n visibility_timeout: (integer) The duration (in seconds) that the received messages are hidden from\n subsequent retrieve requests after being retrieved by a ReceiveMessage request.\n Default: 30\n wait_time_seconds: (integer) The duration (in seconds) for which the call waits for a message\n to arrive in the queue before returning.\n Default: 20.\n \"\"\"\n while True:\n messages = self._boto3_client.receive_message(\n QueueUrl=self.get_queue_url_by_name(queue_name),\n MaxNumberOfMessages=max_number_of_messages,\n VisibilityTimeout=visibility_timeout,\n WaitTimeSeconds=wait_time_seconds,\n )\n\n if \"Messages\" in messages and messages[\"Messages\"]:\n for message in messages[\"Messages\"]:\n try:\n callback(message)\n except Exception as e:\n exception(e)\n\n self.delete_message(queue_name, message)\n\n def publish(\n self,\n queue_name,\n message,\n delay_seconds=0,\n ):\n \"\"\"\n This function allows you to publish a message to an SQS queue.\n\n Args:\n queue_name: (string) The name of the SQS queue you want to receive messages.\n message: (string) The message content to be sent.\n delay_seconds: (integer) The length of time, in seconds, for which to delay a specific message.\n Valid values: 0 to 900. Default: 0\n \"\"\"\n self._boto3_client.send_message(\n QueueUrl=self.get_queue_url_by_name(queue_name),\n DelaySeconds=delay_seconds,\n MessageBody=message,\n )"
},
{
"identifier": "Publisher",
"path": "sqs_client/publisher.py",
"snippet": "class Publisher:\n \"\"\"\n This class represents a publisher to send messages to an SQS queue\n \"\"\"\n\n def __init__(\n self,\n sqs_client,\n queue_name,\n delay_seconds=0,\n ):\n \"\"\"\n Initializes the Publisher class.\n\n Args:\n sqs_client: (SQSClient) The SQSClient of task.\n queue_name: (string) The name of the SQS queue you want to send and receive messages.\n delay_seconds: (integer) The length of time, in seconds, for which to delay a specific message.\n Valid values: 0 to 900. Default: 0\n \"\"\"\n self._sqs_client = sqs_client\n self._queue_name = queue_name\n self._delay_seconds = delay_seconds\n\n def publish(self, message):\n \"\"\"\n This function allows you to publish a message to an SQS queue.\n\n Args:\n message: (string) The message content to be sent.\n \"\"\"\n self._sqs_client.publish(\n queue_name=self._queue_name,\n delay_seconds=self._delay_seconds,\n message=message,\n )\n\n def publish_lazy(self, *args, **kwargs):\n \"\"\"\n This function allows you to publish a message in lazy mode.\n \"\"\"\n self._sqs_client.publish(\n queue_name=self._queue_name,\n delay_seconds=self._delay_seconds,\n message=json.dumps(\n {\n \"args\": args,\n \"kwargs\": kwargs,\n }\n ),\n )"
}
] | from sqs_client.client import SQSClient
from sqs_client.publisher import Publisher | 2,129 |
sqs_client = SQSClient()
sqs_client.publish(
queue_name="sqs-queue-name",
message="test message",
)
# or
|
sqs_client = SQSClient()
sqs_client.publish(
queue_name="sqs-queue-name",
message="test message",
)
# or
| publisher = Publisher( | 1 | 2023-12-06 07:35:29+00:00 | 4k |
LkPrtctrd/BSL-V53 | Heart/Packets/Client/Authentification/LoginMessage.py | [
{
"identifier": "Messaging",
"path": "Heart/Messaging.py",
"snippet": "class Messaging:\n def writeHeader(message, payloadLen):\n message.messageBuffer += message.getMessageType().to_bytes(2, 'big', signed=True)\n message.messageBuffer += payloadLen.to_bytes(3, 'big', signed=True)\n message.messageBuffer += message.messageVersion.to_bytes(2, 'big', signed=True)\n\n def readHeader(headerBytes):\n headerData = []\n headerData.append(int.from_bytes(headerBytes[:2], 'big', signed=True))\n headerData.append(int.from_bytes(headerBytes[2:5], 'big', signed=True))\n return headerData\n\n def sendMessage(messageType, fields, cryptoInit, player=None):\n from Heart.Logic.LogicLaserMessageFactory import LogicLaserMessageFactory\n message = LogicLaserMessageFactory.createMessageByType(messageType, b'')\n if player is not None:\n message.encode(fields, player)\n else:\n message.encode(fields)\n message.messagePayload = cryptoInit.encryptServer(message.getMessageType(), message.messagePayload)\n Messaging.writeHeader(message, len(message.messagePayload))\n message.messageBuffer += message.messagePayload\n try:\n fields[\"Socket\"].send(message.messageBuffer)\n except Exception:\n print(traceback.format_exc())"
},
{
"identifier": "DatabaseHandler",
"path": "DB/DatabaseHandler.py",
"snippet": "class DatabaseHandler():\n def __init__(self):\n self.conn = sqlite3.connect(\"DB/Files/player.sqlite\")\n self.cursor = self.conn.cursor()\n try:\n self.cursor.execute(\"\"\"CREATE TABLE main (ID int, Token text, Data json)\"\"\")\n except sqlite3.OperationalError:\n pass\n except Exception:\n print(traceback.format_exc())\n\n def createAccount(self, data):\n try:\n self.cursor.execute(\"INSERT INTO main (ID, Token, Data) VALUES (?, ?, ?)\", (data[\"ID\"][1], data[\"Token\"], json.dumps(data, ensure_ascii=0)))\n self.conn.commit()\n except Exception:\n print(traceback.format_exc())\n\n def getAll(self):\n self.playersId = []\n try:\n self.cursor.execute(\"SELECT * from main\")\n self.db = self.cursor.fetchall()\n for i in range(len(self.db)):\n self.playersId.append(self.db[i][0])\n return self.playersId\n except Exception:\n print(traceback.format_exc())\n\n def getPlayer(self, plrId):\n try:\n self.cursor.execute(\"SELECT * from main where ID=?\", (plrId[1],))\n return json.loads(self.cursor.fetchall()[0][2])\n except Exception:\n print(traceback.format_exc())\n\n def getPlayerEntry(self, plrId):\n try:\n self.cursor.execute(\"SELECT * from main where ID=?\", (plrId[1],))\n return self.cursor.fetchall()[0]\n except IndexError:\n pass\n except Exception:\n print(traceback.format_exc())\n\n def loadAccount(self, player, plrId):\n try:\n self.cursor.execute(\"SELECT * from main where ID=?\", (plrId[1],))\n playerData = json.loads(self.cursor.fetchall()[0][2])\n player.ID = playerData[\"ID\"]\n player.Name = playerData[\"Name\"]\n #player.AllianceID = playerData[\"AllianceID\"]\n player.Registered = playerData[\"Registered\"]\n player.Thumbnail = playerData[\"Thumbnail\"]\n player.Namecolor = playerData[\"Namecolor\"]\n player.Region = playerData[\"Region\"]\n player.ContentCreator = playerData[\"ContentCreator\"]\n player.Coins = playerData[\"Coins\"]\n player.Gems = playerData[\"Gems\"]\n player.Blings = playerData[\"Blings\"]\n player.Trophies = playerData[\"Trophies\"]\n player.HighestTrophies = playerData[\"HighestTrophies\"]\n player.TrophyRoadTier = playerData[\"TrophyRoadTier\"]\n player.Experience = playerData[\"Experience\"]\n player.Level = playerData[\"Level\"]\n player.Tokens = playerData[\"Tokens\"]\n player.TokensDoubler = playerData[\"TokensDoubler\"]\n player.SelectedBrawlers = playerData[\"SelectedBrawlers\"]\n player.OwnedPins = playerData[\"OwnedPins\"]\n player.OwnedThumbnails = playerData[\"OwnedThumbnails\"]\n player.OwnedBrawlers = playerData[\"OwnedBrawlers\"]\n player.OwnedSkins = playerData[\"OwnedSkins\"]\n except Exception:\n print(traceback.format_exc())\n\n def updatePlayerData(self, data, calling_instance):\n try:\n self.cursor.execute(\"UPDATE main SET Data=? WHERE ID=?\", (json.dumps(data, ensure_ascii=0), calling_instance.player.ID[1]))\n self.conn.commit()\n self.loadAccount(calling_instance.player, calling_instance.player.ID)\n except Exception:\n print(traceback.format_exc())\n\n def playerExist(self, loginToken, loginID):\n try:\n if loginID[1] in self.getAll():\n if loginToken != self.getPlayerEntry(loginID)[1]:\n return False\n return True\n return False\n except Exception:\n print(traceback.format_exc())"
},
{
"identifier": "PiranhaMessage",
"path": "Heart/Packets/PiranhaMessage.py",
"snippet": "class PiranhaMessage(ByteStream):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageBuffer = messageData\n self.fields = {}\n\n def decode(self, fields):\n if True:\n print()\n for typeName,value in fields.items():\n print(f\"{typeName}: {value}\")\n print()\n\n def getLength(self):\n return len(self.messageBuffer)\n\n def isServerToClient(self):\n messageType = self.getMessageType()\n if 20000 <= messageType < 30000 or messageType == 40000:\n return True\n elif 10000 <= messageType < 20000 or messageType == 30000:\n return False"
},
{
"identifier": "ClientsManager",
"path": "Heart/Utils/ClientsManager.py",
"snippet": "class ClientsManager:\n\n PlayersList = {}\n\n def AddPlayer(playerID, socket):\n if ClientsManager.PlayersList.keys().__contains__(playerID[1]):\n ClientsManager.RemovePlayer(playerID)\n ClientsManager.PlayersList[playerID[1]] = {\"Socket\": socket}\n\n def RemovePlayer(PlayerID):\n try:\n ClientsManager.PlayersList.pop(PlayerID[1])\n except KeyError:\n print(f\"Cannot remove socket with id: {PlayerID} Reason: {PlayerID} is not in the list.\")\n\n def GetAll():\n return ClientsManager.PlayersList\n\n def GetCount():\n return len(ClientsManager.PlayersList)"
}
] | from Heart.Messaging import Messaging
from DB.DatabaseHandler import DatabaseHandler
from Heart.Packets.PiranhaMessage import PiranhaMessage
from Heart.Utils.ClientsManager import ClientsManager
import json | 1,994 |
class LoginMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields):
pass
def decode(self):
fields = {}
fields["AccountID"] = self.readLong()
fields["PassToken"] = self.readString()
fields["ClientMajor"] = self.readInt()
fields["ClientMinor"] = self.readInt()
fields["ClientBuild"] = self.readInt()
fields["ResourceSha"] = self.readString()
fields["Device"] = self.readString()
fields["PreferredLanguage"] = self.readDataReference()
fields["PreferredDeviceLanguage"] = self.readString()
fields["OSVersion"] = self.readString()
fields["isAndroid"] = self.readBoolean()
fields["IMEI"] = self.readString()
fields["AndroidID"] = self.readString()
fields["isAdvertisingEnabled"] = self.readBoolean()
fields["AppleIFV"] = self.readString()
fields["RndKey"] = self.readInt()
fields["AppStore"] = self.readVInt()
fields["ClientVersion"] = self.readString()
fields["TencentOpenId"] = self.readString()
fields["TencentToken"] = self.readString()
fields["TencentPlatform"] = self.readVInt()
fields["DeviceVerifierResponse"] = self.readString()
fields["AppLicensingSignature"] = self.readString()
fields["DeviceVerifierResponse"] = self.readString()
super().decode(fields)
return fields
def execute(message, calling_instance, fields, cryptoInit):
if fields["ClientMajor"]==53:
calling_instance.player.ClientVersion = f'{str(fields["ClientMajor"])}.{str(fields["ClientBuild"])}.{str(fields["ClientMinor"])}'
fields["Socket"] = calling_instance.client
|
class LoginMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields):
pass
def decode(self):
fields = {}
fields["AccountID"] = self.readLong()
fields["PassToken"] = self.readString()
fields["ClientMajor"] = self.readInt()
fields["ClientMinor"] = self.readInt()
fields["ClientBuild"] = self.readInt()
fields["ResourceSha"] = self.readString()
fields["Device"] = self.readString()
fields["PreferredLanguage"] = self.readDataReference()
fields["PreferredDeviceLanguage"] = self.readString()
fields["OSVersion"] = self.readString()
fields["isAndroid"] = self.readBoolean()
fields["IMEI"] = self.readString()
fields["AndroidID"] = self.readString()
fields["isAdvertisingEnabled"] = self.readBoolean()
fields["AppleIFV"] = self.readString()
fields["RndKey"] = self.readInt()
fields["AppStore"] = self.readVInt()
fields["ClientVersion"] = self.readString()
fields["TencentOpenId"] = self.readString()
fields["TencentToken"] = self.readString()
fields["TencentPlatform"] = self.readVInt()
fields["DeviceVerifierResponse"] = self.readString()
fields["AppLicensingSignature"] = self.readString()
fields["DeviceVerifierResponse"] = self.readString()
super().decode(fields)
return fields
def execute(message, calling_instance, fields, cryptoInit):
if fields["ClientMajor"]==53:
calling_instance.player.ClientVersion = f'{str(fields["ClientMajor"])}.{str(fields["ClientBuild"])}.{str(fields["ClientMinor"])}'
fields["Socket"] = calling_instance.client | db_instance = DatabaseHandler() | 1 | 2023-12-14 18:57:56+00:00 | 4k |
sockheadrps/AIODesa | tests/test_table.py | [
{
"identifier": "ForeignKey",
"path": "aiodesa/utils/table.py",
"snippet": "class ForeignKey(NamedTuple):\n \"\"\"\n Represents a foreign key relationship in a database.\n Args:\n key: The column name representing the foreign key.\n table: The name of the referenced table.\n\n Example:\n\n .. code-block:: python\n\n @set_key(ForeignKey(key='user_id', table='users'))\n\n Note:\n Intended to be consumed by set_key() \\n\n \"\"\"\n\n key: str\n table: str"
},
{
"identifier": "PrimaryKey",
"path": "aiodesa/utils/table.py",
"snippet": "class PrimaryKey(NamedTuple):\n \"\"\"\n Represents primary key columns in a database table.\n\n Args:\n column: Primary key identifer.\n\n Example:\n\n .. code-block:: python\n\n # Define a primary key with the column names 'user_id' and 'post_id':\n @set_key(PrimaryKey('user_id')\n\n\n Note:\n Intended to be consumed by set_key() \\n\n \"\"\"\n\n column: str"
},
{
"identifier": "UniqueKey",
"path": "aiodesa/utils/table.py",
"snippet": "class UniqueKey(NamedTuple):\n \"\"\"\n Represents unique key column in a table.\n\n Args:\n column: column name representing unique key.\n\n Example:\n\n .. code-block:: python\n\n # Define a unique key with the column names 'username' and 'email':\n user_unique_key = UniqueKey('username')\n\n Note:\n Intended to be consumed by set_key() \\n\n \"\"\"\n\n column: str"
},
{
"identifier": "set_key",
"path": "aiodesa/utils/table.py",
"snippet": "def set_key(*args: PrimaryKey | UniqueKey | ForeignKey | tuple[ForeignKey, ...]):\n \"\"\"\n Decorator for setting keys on a class.\n\n Args:\n `*args`: The keys to be set. Can include PrimaryKey, UniqueKey,\n ForeignKey, or a tuple of ForeignKeys.\n\n Returns:\n A decorator function to set keys on a class.\n\n Example:\n\n .. code-block:: python\n\n @dataclass\n @set_key(PrimaryKey(\n \"username\"), UniqueKey(\"id\"), ForeignKey(\"username\", \"anothertable\"\n ))\n class Users:\n username: str\n id: str | None = None\n table_name: str = \"users\"\n\n Note:\n Foreign keys can be specified individually or as a tuple.\n \"\"\"\n\n def decorator(cls):\n for arg in args:\n if isinstance(arg, PrimaryKey):\n if not hasattr(cls, \"primary_key\"):\n # cls.primary_key: str = arg.column\n setattr(cls, \"primary_key\", arg.column)\n\n elif isinstance(arg, UniqueKey):\n if not hasattr(cls, \"unique_key\"):\n # cls.unique_key: str = arg.column\n setattr(cls, \"unique_key\", arg.column)\n\n elif isinstance(arg, tuple):\n if not any(\n isinstance(existing_key, (PrimaryKey, UniqueKey))\n for existing_key in getattr(cls, \"foreign_keys\", ())\n ):\n existing_foreign_keys = getattr(cls, \"foreign_keys\", ())\n cls.foreign_keys = existing_foreign_keys + (arg,)\n\n elif isinstance(arg, ForeignKey):\n existing_foreign_keys = getattr(cls, \"foreign_keys\", ())\n cls.foreign_keys = existing_foreign_keys + arg\n\n return cls\n\n return decorator"
},
{
"identifier": "make_schema",
"path": "aiodesa/utils/table.py",
"snippet": "def make_schema(name: str, data_cls: Any) -> TableSchema:\n \"\"\"\n Generate a TableSchema based on the provided data class.\n\n Args:\n name: The name of the table.\n data_cls: A data class defining the schema for the table.\n\n Returns:\n TableSchema: An instance of TableSchema containing the table_name and\n SQL data definition.\n\n Example:\n\n .. code-block:: python\n\n user_table_schema = generate_table_schema(name='users', data_cls=User)\n\n Note:\n The function returns a TableSchema instance containing the table_name\n and SQL data definition.\n \"\"\"\n columns = []\n name = name.replace(\" \", \"_\")\n for field_name, field_type in data_cls.__annotations__.items():\n if field_name == \"table_name\":\n pass\n else:\n columns.append(f\"{field_name} {py_to_sql_type(field_type)}\")\n if hasattr(data_cls, \"primary_key\"):\n columns.append(f\"PRIMARY KEY ({data_cls.primary_key})\")\n if hasattr(data_cls, \"unique_key\"):\n columns.append(f\"UNIQUE ({data_cls.unique_key})\")\n\n schema = TableSchema(\n name, f\"CREATE TABLE IF NOT EXISTS {name} (\\n{', '.join(columns)}\\n);\"\n )\n\n return schema"
}
] | from aiodesa.utils.table import (
ForeignKey,
PrimaryKey,
UniqueKey,
set_key,
make_schema,
)
from dataclasses import dataclass
from uuid import uuid4 | 1,686 |
def test_ForeignKey():
"""
Test the ForeignKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
table = uuid4()
key = uuid4()
foreign_key = ForeignKey(key, table)
assert foreign_key.table == table
assert foreign_key.key == key
def test_PrimaryKey():
"""
Test the PrimaryKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
column = uuid4()
primary_key = PrimaryKey(column)
assert primary_key.column == column
def test_UniqueKey():
"""
Test the UniqueKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
column = uuid4()
unique_key = UniqueKey(column)
assert unique_key.column == column
def test_set_key():
"""
Test the behavior of the set_key decorator.
This test checks that the set_key decorator correctly sets primary, unique, and foreign keys
on a class using PrimaryKey, UniqueKey, and ForeignKey attributes.
"""
test_column_1 = uuid4()
test_column_2 = uuid4()
foriegn_key_table = uuid4()
foriegn_key_key = uuid4()
@set_key(
PrimaryKey(test_column_1),
UniqueKey(test_column_2),
ForeignKey(foriegn_key_key, foriegn_key_table),
)
class TestTable:
test_column_1: str | None = None
test_column_2: int | None = None
assert TestTable.primary_key == test_column_1
assert TestTable.unique_key == test_column_2
assert TestTable.foreign_keys[0].table == foriegn_key_table
assert TestTable.foreign_keys[0].key == foriegn_key_key
def test_make_schema():
"""
Tests that the table SQL is generated correctly
"""
table_name = uuid4
@dataclass
class TestTable:
table_name: str
test_column_1: str = "Test"
table = TestTable(table_name)
|
def test_ForeignKey():
"""
Test the ForeignKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
table = uuid4()
key = uuid4()
foreign_key = ForeignKey(key, table)
assert foreign_key.table == table
assert foreign_key.key == key
def test_PrimaryKey():
"""
Test the PrimaryKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
column = uuid4()
primary_key = PrimaryKey(column)
assert primary_key.column == column
def test_UniqueKey():
"""
Test the UniqueKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
column = uuid4()
unique_key = UniqueKey(column)
assert unique_key.column == column
def test_set_key():
"""
Test the behavior of the set_key decorator.
This test checks that the set_key decorator correctly sets primary, unique, and foreign keys
on a class using PrimaryKey, UniqueKey, and ForeignKey attributes.
"""
test_column_1 = uuid4()
test_column_2 = uuid4()
foriegn_key_table = uuid4()
foriegn_key_key = uuid4()
@set_key(
PrimaryKey(test_column_1),
UniqueKey(test_column_2),
ForeignKey(foriegn_key_key, foriegn_key_table),
)
class TestTable:
test_column_1: str | None = None
test_column_2: int | None = None
assert TestTable.primary_key == test_column_1
assert TestTable.unique_key == test_column_2
assert TestTable.foreign_keys[0].table == foriegn_key_table
assert TestTable.foreign_keys[0].key == foriegn_key_key
def test_make_schema():
"""
Tests that the table SQL is generated correctly
"""
table_name = uuid4
@dataclass
class TestTable:
table_name: str
test_column_1: str = "Test"
table = TestTable(table_name) | schema = make_schema(table_name, table) | 4 | 2023-12-09 05:52:25+00:00 | 4k |
DavidBellamy/labrador | scripts/pretraining/train_labrador.py | [
{
"identifier": "get_dataset",
"path": "lab_transformers/data/read_labrador_tf_records.py",
"snippet": "def get_dataset(\n filenames: List[str],\n batch_size: int,\n pad_token: int,\n random_seed: int,\n shuffle_buffer_size: int,\n) -> tf.data.TFRecordDataset:\n dataset = (\n tf.data.TFRecordDataset(filenames, num_parallel_reads=tf.data.AUTOTUNE)\n .map(parse_tfrecord_fn, num_parallel_calls=tf.data.AUTOTUNE)\n .shuffle(shuffle_buffer_size, seed=random_seed)\n .padded_batch(\n batch_size=batch_size,\n padding_values=(\n {\"categorical_input\": pad_token, \"continuous_input\": float(pad_token)},\n {\"categorical_output\": -1, \"continuous_output\": -1.0},\n ),\n padded_shapes=(\n {\"categorical_input\": [None], \"continuous_input\": [None]},\n {\"categorical_output\": [None], \"continuous_output\": [None]},\n ),\n drop_remainder=True,\n )\n .prefetch(tf.data.AUTOTUNE)\n )\n return dataset"
},
{
"identifier": "CategoricalMLMLoss",
"path": "lab_transformers/models/labrador/loss.py",
"snippet": "class CategoricalMLMLoss(tf.keras.losses.Loss):\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"\n Masked SparseCategoricalCrossentropy for Labrador's categorical prediction head.\n \"\"\"\n super(CategoricalMLMLoss, self).__init__()\n self.scce = SparseCategoricalCrossentropy(\n from_logits=False, reduction=tf.keras.losses.Reduction.NONE\n )\n\n def call(self, y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:\n y_true_masked = tf.boolean_mask(y_true, tf.not_equal(y_true, -1))\n y_true_masked = tf.subtract(\n y_true_masked, 1\n ) # true labels are 1-indexed, need to be 0-indexed\n y_pred_masked = tf.boolean_mask(y_pred, tf.not_equal(y_true, -1))\n loss = self.scce(y_true_masked, y_pred_masked)\n return loss"
},
{
"identifier": "ContinuousMLMLoss",
"path": "lab_transformers/models/labrador/loss.py",
"snippet": "class ContinuousMLMLoss(tf.keras.losses.Loss):\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"\n Masked MSE for Labrador's continuous prediction head.\n \"\"\"\n super(ContinuousMLMLoss, self).__init__()\n self.mse = MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)\n\n def call(self, y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:\n y_true_masked = tf.boolean_mask(y_true, tf.not_equal(y_true, -1))\n y_pred_masked = tf.boolean_mask(y_pred, tf.not_equal(y_true, -1))\n\n if tf.size(y_true_masked) == 0 and tf.size(y_pred_masked) == 0:\n return 0\n\n loss = self.mse(y_true_masked, y_pred_masked)\n return loss"
},
{
"identifier": "Labrador",
"path": "lab_transformers/models/labrador/model.py",
"snippet": "class Labrador(keras.Model):\n \"\"\"\n Labrador: a BERT-style transformer model trained on a masked language model objective.\n \"\"\"\n\n def __init__(self, params: Dict[str, Any], **kwargs) -> None:\n super(Labrador, self).__init__()\n\n self._params = params\n self.mask_token = params[\"mask_token\"]\n self.pad_token = params[\"pad_token\"]\n self.null_token = params[\"null_token\"]\n self.vocab_size = params[\"vocab_size\"]\n self.embedding_dim = params[\"embedding_dim\"]\n self.transformer_heads = params[\"transformer_heads\"]\n self.num_blocks = params[\"transformer_blocks\"]\n self.transformer_feedforward_dim = params[\"transformer_feedforward_dim\"]\n self.include_head = params[\"include_head\"]\n self.continuous_head_activation = params[\"continuous_head_activation\"]\n self.dropout_rate = params[\"dropout_rate\"]\n\n self.transformer_activation_fn = layers.ReLU()\n\n if isinstance(self.include_head, str):\n self.include_head = self.include_head.lower() == \"true\"\n elif not isinstance(self.include_head, bool):\n raise ValueError(\n f\"include_head must be a boolean or a string, not {type(self.include_head)}\"\n )\n\n # Note: input_dim = vocab_size + 2 because there are 2 special categorical tokens to embed\n # (mask_token and pad_token).\n self.categorical_embedding_layer = layers.Embedding(\n input_dim=self.vocab_size + 2,\n output_dim=self.embedding_dim,\n mask_zero=True,\n name=\"categorical_embedding\",\n )\n\n self.continuous_embedding_layer = ContinuousEmbedding(\n embedding_dim=self.embedding_dim,\n pad_token=self.pad_token,\n mask_token=self.mask_token,\n null_token=self.null_token,\n name=\"continuous_embedding\",\n )\n\n self.projection_layer = layers.Dense(units=self.embedding_dim)\n\n self.blocks = []\n for block in range(self.num_blocks):\n self.blocks.append(\n TransformerBlock(\n embed_dim=self.embedding_dim,\n num_heads=self.transformer_heads,\n activation=self.transformer_activation_fn,\n feedforward_dim=self.transformer_feedforward_dim,\n first_block=True if block == 0 else False,\n dropout_rate=self.dropout_rate,\n name=f\"transformer_block_{block + 1}\",\n )\n )\n\n self.head = MLMPredictionHead(\n vocab_size=self.vocab_size,\n embedding_dim=self.embedding_dim,\n continuous_head_activation=self.continuous_head_activation,\n )\n\n def call(\n self, inputs: Dict[str, tf.Tensor], training: bool = True, **kwargs\n ) -> Dict[str, tf.Tensor]:\n \"\"\"\n :param inputs: Dict with 2 keys: `categorical_input` and `continuous_input`.\n :param training: Bool indicating whether to run the model with or without dropout.\n :return: Dict of categorical predictions, shape (batch_size, max_bag_length, vocab_size),\n and continuous predictions, shape (batch_size, max_bag_length, 1).\n \"\"\"\n\n categorical_input = inputs[\"categorical_input\"]\n continuous_input = inputs[\"continuous_input\"]\n\n x_categorical = self.categorical_embedding_layer(categorical_input)\n x_continuous = self.continuous_embedding_layer(continuous_input, x_categorical)\n\n x = layers.concatenate([x_categorical, x_continuous])\n x = self.projection_layer(x)\n\n for i in range(self.num_blocks):\n x = self.blocks[i](x, training=training)\n\n if self.include_head:\n x = self.head(x)\n\n return x"
}
] | import os
import os.path as op
import sys
import time
import numpy as np
import tensorflow as tf
import wandb
from tensorflow.keras import mixed_precision
from lab_transformers.data.read_labrador_tf_records import get_dataset
from lab_transformers.models.labrador.loss import CategoricalMLMLoss, ContinuousMLMLoss
from lab_transformers.models.labrador.model import Labrador | 2,413 |
# Parse arguments
random_seed = int(sys.argv[1])
mask_token = int(sys.argv[2])
null_token = int(sys.argv[3])
pad_token = int(sys.argv[4])
vocab_size = int(sys.argv[5])
embed_dim = int(sys.argv[6])
use_wandb = True
# Set configuration
system_config = {
"random_seed": random_seed,
"wandb_project_name": "labrador_pretraining",
"wandb_run_name": "run2",
"use_mixed_precision": False,
}
data_config = {
"tfdata_shuffle_buffer_size": 2_560,
"max_seq_len": 90,
"tfrecords_dir_train": "data_full/labrador_tfrecords_train",
"tfrecords_dir_val": "data_full/labrador_tfrecords_val",
}
time_string = time.strftime("%Y%m%d-%H%M%S")
train_config = {
"steps_per_epoch": (20_000 * 182) // 256,
"num_train_epochs": 100,
"learning_rate": 1e-5,
"batch_size": 256,
"model_save_batch_frequency": 14_000, # save the model every n batches during training
"model_checkpoint_directory_name": f"labrador_{time_string}",
"validation_steps": ((20_000 * 27) // 256) // 2,
"validation_step_frequency": 3_500,
} # perform validation every n training batches
model_config = {
"mask_token": mask_token,
"null_token": null_token,
"pad_token": pad_token,
"vocab_size": vocab_size,
"embedding_dim": embed_dim,
"transformer_activation": "relu",
"transformer_heads": 4,
"transformer_blocks": 10,
"transformer_feedforward_dim": 1024,
"include_head": True,
"continuous_head_activation": "sigmoid",
"categorical_loss_fn": CategoricalMLMLoss(),
"continuous_loss_fn": ContinuousMLMLoss(),
"loss_weights": {"categorical_output": 1.0, "continuous_output": 1.0},
"dropout_rate": 0.1,
}
config = {
"data_config": data_config,
"train_config": train_config,
"model_config": model_config,
"system_config": system_config,
}
if config["system_config"]["use_mixed_precision"]:
mixed_precision.set_global_policy("mixed_float16")
if use_wandb:
wandb.login(key=os.environ["wandb_key"])
wandb.init(
project=config["system_config"]["wandb_project_name"],
settings=wandb.Settings(start_method="thread"),
config=config,
name=config["system_config"]["wandb_run_name"],
)
# Read TFRecord data
train_filenames = tf.io.gfile.glob(
op.join(config["data_config"]["tfrecords_dir_train"], "*.tfrec")
)
val_filenames = tf.io.gfile.glob(
op.join(config["data_config"]["tfrecords_dir_val"], "*.tfrec")
)
train_dataset = get_dataset(
train_filenames,
config["train_config"]["batch_size"],
pad_token,
random_seed,
config["data_config"]["tfdata_shuffle_buffer_size"],
)
val_dataset = get_dataset(
train_filenames,
config["train_config"]["batch_size"],
pad_token,
random_seed,
config["data_config"]["tfdata_shuffle_buffer_size"],
)
# Instantiate the transformer model
|
# Parse arguments
random_seed = int(sys.argv[1])
mask_token = int(sys.argv[2])
null_token = int(sys.argv[3])
pad_token = int(sys.argv[4])
vocab_size = int(sys.argv[5])
embed_dim = int(sys.argv[6])
use_wandb = True
# Set configuration
system_config = {
"random_seed": random_seed,
"wandb_project_name": "labrador_pretraining",
"wandb_run_name": "run2",
"use_mixed_precision": False,
}
data_config = {
"tfdata_shuffle_buffer_size": 2_560,
"max_seq_len": 90,
"tfrecords_dir_train": "data_full/labrador_tfrecords_train",
"tfrecords_dir_val": "data_full/labrador_tfrecords_val",
}
time_string = time.strftime("%Y%m%d-%H%M%S")
train_config = {
"steps_per_epoch": (20_000 * 182) // 256,
"num_train_epochs": 100,
"learning_rate": 1e-5,
"batch_size": 256,
"model_save_batch_frequency": 14_000, # save the model every n batches during training
"model_checkpoint_directory_name": f"labrador_{time_string}",
"validation_steps": ((20_000 * 27) // 256) // 2,
"validation_step_frequency": 3_500,
} # perform validation every n training batches
model_config = {
"mask_token": mask_token,
"null_token": null_token,
"pad_token": pad_token,
"vocab_size": vocab_size,
"embedding_dim": embed_dim,
"transformer_activation": "relu",
"transformer_heads": 4,
"transformer_blocks": 10,
"transformer_feedforward_dim": 1024,
"include_head": True,
"continuous_head_activation": "sigmoid",
"categorical_loss_fn": CategoricalMLMLoss(),
"continuous_loss_fn": ContinuousMLMLoss(),
"loss_weights": {"categorical_output": 1.0, "continuous_output": 1.0},
"dropout_rate": 0.1,
}
config = {
"data_config": data_config,
"train_config": train_config,
"model_config": model_config,
"system_config": system_config,
}
if config["system_config"]["use_mixed_precision"]:
mixed_precision.set_global_policy("mixed_float16")
if use_wandb:
wandb.login(key=os.environ["wandb_key"])
wandb.init(
project=config["system_config"]["wandb_project_name"],
settings=wandb.Settings(start_method="thread"),
config=config,
name=config["system_config"]["wandb_run_name"],
)
# Read TFRecord data
train_filenames = tf.io.gfile.glob(
op.join(config["data_config"]["tfrecords_dir_train"], "*.tfrec")
)
val_filenames = tf.io.gfile.glob(
op.join(config["data_config"]["tfrecords_dir_val"], "*.tfrec")
)
train_dataset = get_dataset(
train_filenames,
config["train_config"]["batch_size"],
pad_token,
random_seed,
config["data_config"]["tfdata_shuffle_buffer_size"],
)
val_dataset = get_dataset(
train_filenames,
config["train_config"]["batch_size"],
pad_token,
random_seed,
config["data_config"]["tfdata_shuffle_buffer_size"],
)
# Instantiate the transformer model | model = Labrador(config["model_config"]) | 3 | 2023-12-09 20:40:17+00:00 | 4k |
NLP-Core-Team/RealCode_eval | main.py | [
{
"identifier": "InfillGenerator",
"path": "lm_eval/generators.py",
"snippet": "class InfillGenerator:\n def __init__(self, \n model_path: str,\n num_samples: int,\n prefix_tokens: tp.Union[str, tp.List[int]] = [],\n middle_tokens: tp.Union[str, tp.List[int]] = [],\n suffix_tokens: tp.Union[str, tp.List[int]] = [],\n max_context_length: int = None,\n left_context_ratio: int = 1,\n dtype = torch.bfloat16,\n eos_sequences: tp.List[str] = [\"\\sclass\\s\", \"\\sdef\\s\", \"\\s@\", \"<|endoftext|>\", \"<extra_id_0>\"],\n model_kwargs: tp.Dict = {},\n generation_params: tp.Dict[str, tp.Any] = {},\n context_parser: BaseParser = TrivialContextParser(),\n add_extra_spaces_to_generation=0,\n ):\n \"\"\"\n Class to generate code in fill-in-the-middle mode\n params:\n model_path: str - which model to use for generation, anything that can be passed to AutoModelForCausalLM.from_pretrained\n num_samples: int - number of samples to generate per task, values > 1 should be paired with generation_params\n prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the left context. Can be either str or list of int tokens\n middle_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the right context (see Fill-In-the-Middle). Can be either str or list of int tokens\n suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the right context (see Fill-In-the-Middle). Can be either str or list of int tokens\n max_context_length: int = None - truncation length for prompt, measured in tokens (len(left_context) + len(right_context) < max_context_length) \n left_context_ratio: int = 1 - proportion of max_context_length given to left_context. 1 means 1:1 split between left and right, 3 means 3:1 split in favor of left context \n dtype=torch.bfloat16 - torch dtype to use for inference\n eos_sequences: tp.List[str] = [\"\\sclass\\s\", \"\\sdef\\s\", \"\\s@\", \"<|endoftext|>\", \"<extra_id_0>\"] - regular expressions that determine end of geneartion\n model_kwargs: tp.Dict = {} - kwargs to be passed to AutoModelForCausalLM.from_pretrained\n generation_params: tp.Dict[str, tp.Any] = {} - kwargs to be passed to AutoModelForCausalLM.generate\n context_parser: BaseParser = TrivialContextParser() - parser for left and right contexts\n add_extra_spaces_to_generation=0 - number of added extra spaces add the begining of generation to fix indentation. May be required due to bugs in some tokenizers (e.g. Codellama)\n \"\"\"\n self.device = torch.device(\"cuda\")\n # self.device = torch.device(\"cpu\")\n logger.info(f\"Loading model from {model_path} with kwargs f{model_kwargs}\")\n self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n self.model = AutoModelForCausalLM.from_pretrained(model_path, \n torch_dtype=dtype, device_map=\"auto\", trust_remote_code=True, **model_kwargs\n ).eval() \n logger.info(f\"Loaded model from {model_path} with kwargs f{model_kwargs}\")\n logger.info(f\"Device map: \\n{self.model.hf_device_map}\")\n\n self.num_samples = num_samples\n \n self.prefix_tokens = self.tokenize_special_tokens(prefix_tokens)\n self.middle_tokens = self.tokenize_special_tokens(middle_tokens)\n self.suffix_tokens = self.tokenize_special_tokens(suffix_tokens)\n\n logger.debug(f\"prefix_tokens: {self.prefix_tokens}, middle_tokens: {self.middle_tokens}, suffix_tokens: {self.suffix_tokens}\")\n\n self.eos_sequences = eos_sequences[:]\n\n #context truncation parameters\n self.max_context_length = max_context_length\n self.left_context_truncate_at = left_context_ratio / (left_context_ratio + 1)\n self.right_context_truncate_at = 1 / (left_context_ratio + 1)\n\n self.generation_params = generation_params\n self.generation_params['num_return_sequences'] = self.num_samples\n\n self.context_parser = context_parser\n # Number of tokens before and after truncating to max_context_length\n self.count_inferenced_tokens = []\n self.count_possible_tokens = []\n self.add_extra_spaces_to_generation = add_extra_spaces_to_generation\n\n def tokenize_special_tokens(self, str_or_list: tp.Union[str, tp.List[int]]) -> torch.Tensor: \n if type(str_or_list) == str:\n return self.tokenizer.encode(str_or_list, return_tensors=\"pt\", add_special_tokens=False).to(self.device) # ['input_ids']\n else:\n return torch.as_tensor(str_or_list).unsqueeze(0).to(self.device)\n\n def _prepare_tokens(self, task: Task) -> torch.Tensor:\n left_context_str, right_context_str = self.context_parser.get_left_and_right_context(task)\n logger.info(\"\\n\" + \"\\n\".join(left_context_str.split('\\n')[-20:]))\n left_tokens = self.tokenizer.encode(\n left_context_str, return_tensors=\"pt\", add_special_tokens=False).to(self.device) # ['input_ids']\n right_tokens = self.tokenizer.encode(\n right_context_str, return_tensors=\"pt\", add_special_tokens=False).to(self.device) # ['input_ids']\n self.count_possible_tokens.append(left_tokens.shape[1] + right_tokens.shape[1])\n if self.max_context_length and left_tokens.shape[1] + right_tokens.shape[1] > self.max_context_length:\n logger.debug(\"Truncating context\")\n \n left_tokens = left_tokens[:, -min(int(self.max_context_length * self.left_context_truncate_at), left_tokens.shape[1]) + 1:]\n right_tokens = right_tokens[:, :min(int(self.max_context_length * self.right_context_truncate_at), right_tokens.shape[1]) - 1]\n tokens = torch.cat([self.prefix_tokens, left_tokens, self.middle_tokens, right_tokens, self.suffix_tokens], dim=-1).type(torch.long)\n return tokens\n \n def _postprocess(self, generation: str):\n new_gen = []\n for i, line in enumerate(generation.split('\\n')):\n if i == 0 and self.add_extra_spaces_to_generation: \n # ugly hack for codellama, weirdly removing space for skip_special_tokens=True\n line = ' '*self.add_extra_spaces_to_generation + line\n for eos in self.eos_sequences:\n if re.search(eos, line):\n return \"\\n\".join(new_gen).rstrip() + '\\n\\n'\n new_gen.append(line)\n return \"\\n\".join(new_gen).rstrip() + '\\n\\n'\n\n @torch.no_grad()\n def generate(self, tasks: tp.List[Task]) -> tp.List[tp.List[str]]:\n res = []\n for i, task in tqdm(enumerate(tasks)):\n tokens = self._prepare_tokens(task)\n if i == 0:\n logger.debug(f\"\\nTokens: {tokens[:, :5]} ... {tokens[:, -5:]}\\n\")\n generated_tokens = self.model.generate(tokens, **self.generation_params)\n generations = self.tokenizer.batch_decode(generated_tokens[:, tokens.shape[1]:], skip_special_tokens=True)\n if i % 1 == 0:\n logger.debug(f\"Generation for task {i}:\\n{self._postprocess(generations[0])}\")\n res.append([self._postprocess(t) for t in generations])\n self.count_inferenced_tokens.append([len(t) for t in tokens])\n return res"
},
{
"identifier": "LMGenerator",
"path": "lm_eval/generators.py",
"snippet": "class LMGenerator(InfillGenerator):\n def __init__(self, \n lm_prefix_tokens: tp.Union[str, tp.List[int]] = [],\n lm_suffix_tokens: tp.Union[str, tp.List[int]] = [],\n **kwargs\n ):\n \"\"\"\n Class to generate code in causal LM mode, uses only left context\n params:\n lm_prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the context. Can be either str or list of int tokens\n lm_suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the context. Can be either str or list of int tokens\n \"\"\"\n super().__init__(**kwargs)\n self.lm_prefix_tokens = super().tokenize_special_tokens(lm_prefix_tokens)\n self.lm_suffix_tokens = super().tokenize_special_tokens(lm_suffix_tokens)\n logger.debug(f\"lm_prefix_tokens: {self.lm_prefix_tokens}, lm_suffix_tokens: {self.lm_suffix_tokens}\")\n\n def _prepare_tokens(self, task: Task) -> torch.Tensor:\n left_context_str, _ = self.context_parser.get_left_and_right_context(task)\n logger.info(\"\\n\" + \"\\n\".join(left_context_str.split('\\n')[-20:]))\n left_tokens = self.tokenizer.encode(\n left_context_str, return_tensors=\"pt\", add_special_tokens=False).to(self.device) # ['input_ids']\n self.count_possible_tokens.append(left_tokens.shape[1])\n if self.max_context_length and left_tokens.shape[1] > self.max_context_length:\n left_tokens = left_tokens[:, -self.max_context_length:]\n tokens = torch.cat([self.lm_prefix_tokens, left_tokens, self.lm_suffix_tokens], dim=-1).type(torch.long)\n return tokens"
},
{
"identifier": "Evaluator",
"path": "lm_eval/evaluator.py",
"snippet": "class Evaluator:\n def __init__(self, \n dataset_root: os.PathLike,\n num_samples: int,\n pass_k_list: tp.List[int] = [1],\n njobs: int = 1,\n working_dir: tp.Optional[os.PathLike] = None,\n metric_aggregations: tp.Dict[str, tp.Callable[[Task], int]] = METRIC_AGGREGATIONS\n ):\n self.metrics = []\n for pass_k in pass_k_list:\n if num_samples < pass_k:\n raise ValueError(f\"num_samples {num_samples} must be greater than or equal to PassK={pass_k}\")\n self.metrics.append(PassK(pass_k, num_samples))\n self.dataset_root = dataset_root\n self.num_samples = num_samples\n self.njobs = njobs\n self.working_dir = working_dir\n self.metric_aggregations = metric_aggregations\n \n def evaluate(self, \n tasks: tp.List[Task],\n generations: tp.List[tp.List[str]],\n ) -> tp.Dict[tp.Literal[\"aggregated\", \"detailed\"], tp.Any]:\n logger.info(f\"Evaluating {len(tasks)} tasks with {self.num_samples} samples on {self.njobs} CPUs\")\n # Run test evaluation\n if self.njobs == 1:\n results = [\n [evaluate_override( self.dataset_root, task, gen, os.path.join(self.working_dir) ) for gen in generations[i]]\n for i, task in enumerate(tasks)\n ]\n else:\n with Manager() as manager:\n cache = manager.dict()\n with manager.Pool(processes=self.njobs) as pool:\n results = [[None for _2 in range(self.num_samples)] for _ in tasks]\n async_result = pool.starmap_async(\n evaluate_override_wrapped, [\n ( self.dataset_root, task, gen, os.path.join(self.working_dir, f\"{j}_{i}\"), j, i, cache )\n for j, task in enumerate(tasks) for i, gen in enumerate(generations[j])\n ]\n )\n res = async_result.get()\n for task_n, gen_n, result in res:\n results[task_n][gen_n] = result\n if task_n % 25 == 0 and gen_n == 0:\n logger.debug(result['output'])\n\n # Calculate metrics per task\n all_metric_names = ['compilation_error_rate', 'exact_match'] + [t.name() for t in self.metrics]\n metrics = []\n agg_metrics = {level: {metric_name: defaultdict(list) for metric_name in all_metric_names} for level in self.metric_aggregations}\n for task, task_results, task_generations in zip(tasks, results, generations):\n if len(task_results) != self.num_samples:\n raise ValueError(f\"Task {task} has {len(task_results)} samples, expected {self.num_samples}\")\n correct = sum([int(t['passed'] == task.total_tests) for t in task_results])\n not_compiles = mean([int(t['passed'] + t['failed'] == 0) for t in task_results])\n exact_match = mean([int(re.sub(r'\\W+', '', task.gt) == re.sub(r'\\W+', '', gen)) for gen in task_generations])\n task_metrics = {'compilation_error_rate': not_compiles, 'exact_match': exact_match}\n for metric in self.metrics:\n task_metrics[metric.name()] = metric(correct)\n task_metrics['evaluations'] = [t['output'] for t in task_results]\n metrics.append(task_metrics)\n for level, level_func in self.metric_aggregations.items():\n for metric in all_metric_names:\n agg_metrics[level][metric][level_func(task)].append(task_metrics[metric])\n \n for level in self.metric_aggregations:\n for metric_name in all_metric_names:\n means = {val: mean(agg_metrics[level][metric_name][val]) for val in agg_metrics[level][metric_name]}\n agg_metrics[level][metric_name] = means\n\n # Save metics\n metrics = agg_metrics | {\n \"detailed\": [asdict(task) | task_metric for task, task_metric in zip(tasks, metrics)]\n }\n return metrics"
},
{
"identifier": "TrivialContextParser",
"path": "lm_eval/context_parser.py",
"snippet": "class TrivialContextParser(BaseParser):\n def get_left_and_right_context(self, task: Task) -> tp.Tuple[str, str]:\n \"\"\"\n returns left and right context without processing\n \"\"\"\n return task.left_context, task.right_context"
},
{
"identifier": "load_dataset",
"path": "lm_eval/utils.py",
"snippet": "def load_dataset(root_path: os.PathLike, meta_file: str = 'dataset.json', limit: int = 10_000) -> List[Task]:\n with open(Path(root_path) / meta_file, 'r') as f:\n dataset = [Task(**t) for t in json.load(f)][:limit]\n return dataset "
}
] | import hydra
import torch
import numpy as np
import random
import json
import os
import logging
from lm_eval.generators import InfillGenerator, LMGenerator
from lm_eval.evaluator import Evaluator
from lm_eval.context_parser import TrivialContextParser
from lm_eval.utils import load_dataset
from omegaconf import DictConfig, OmegaConf | 3,575 |
logger = logging.getLogger("RealCode")
logger.setLevel(logging.DEBUG)
def seed_all(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@hydra.main(config_path="config", config_name="config")
def main(cfg: DictConfig) -> None:
seed_all(cfg.seed)
print(cfg)
dataset = load_dataset(cfg.dataset_root, cfg.dataset_meta_file, cfg.limit)
logger.info(f"loaded {cfg.dataset_root} {cfg.dataset_meta_file}")
if 'context_parser' in cfg:
parser = hydra.utils.instantiate(cfg.context_parser)
else:
|
logger = logging.getLogger("RealCode")
logger.setLevel(logging.DEBUG)
def seed_all(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@hydra.main(config_path="config", config_name="config")
def main(cfg: DictConfig) -> None:
seed_all(cfg.seed)
print(cfg)
dataset = load_dataset(cfg.dataset_root, cfg.dataset_meta_file, cfg.limit)
logger.info(f"loaded {cfg.dataset_root} {cfg.dataset_meta_file}")
if 'context_parser' in cfg:
parser = hydra.utils.instantiate(cfg.context_parser)
else: | parser = TrivialContextParser() | 3 | 2023-12-12 12:43:06+00:00 | 4k |
centrifugal/grand-chat-tutorial | backend/chat/views.py | [
{
"identifier": "Message",
"path": "backend/chat/models.py",
"snippet": "class Message(models.Model):\n room = models.ForeignKey(Room, related_name='messages', on_delete=models.CASCADE)\n # Note, message may have null user – we consider such messages \"system\". These messages\n # initiated by the backend and have no user author. We are not using such messages in\n # the example currently, but leave the opportunity to extend.\n user = models.ForeignKey(User, related_name='messages', on_delete=models.CASCADE, null=True)\n content = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True)"
},
{
"identifier": "Room",
"path": "backend/chat/models.py",
"snippet": "class Room(models.Model):\n name = models.CharField(max_length=100, unique=True)\n version = models.PositiveBigIntegerField(default=0)\n created_at = models.DateTimeField(auto_now_add=True)\n bumped_at = models.DateTimeField(auto_now_add=True)\n last_message = models.ForeignKey(\n 'Message', related_name='last_message_rooms',\n on_delete=models.SET_NULL, null=True, blank=True,\n )\n\n def increment_version(self):\n self.version += 1\n self.save()\n return self.version\n\n def __str__(self):\n return self.name"
},
{
"identifier": "RoomMember",
"path": "backend/chat/models.py",
"snippet": "class RoomMember(models.Model):\n room = models.ForeignKey(Room, related_name='memberships', on_delete=models.CASCADE)\n user = models.ForeignKey(User, related_name='rooms', on_delete=models.CASCADE)\n joined_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n unique_together = ('room', 'user')\n\n def __str__(self):\n return f\"{self.user.username} in {self.room.name}\""
},
{
"identifier": "Outbox",
"path": "backend/chat/models.py",
"snippet": "class Outbox(models.Model):\n method = models.TextField(default=\"publish\")\n payload = models.JSONField()\n partition = models.BigIntegerField(default=0)\n created_at = models.DateTimeField(auto_now_add=True)"
},
{
"identifier": "CDC",
"path": "backend/chat/models.py",
"snippet": "class CDC(models.Model):\n method = models.TextField(default=\"publish\")\n payload = models.JSONField()\n partition = models.BigIntegerField(default=0)\n created_at = models.DateTimeField(auto_now_add=True)"
},
{
"identifier": "MessageSerializer",
"path": "backend/chat/serializers.py",
"snippet": "class MessageSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n room = MessageRoomSerializer(read_only=True)\n\n class Meta:\n model = Message\n fields = ['id', 'content', 'user', 'room', 'created_at']"
},
{
"identifier": "RoomSearchSerializer",
"path": "backend/chat/serializers.py",
"snippet": "class RoomSearchSerializer(serializers.ModelSerializer):\n\n is_member = serializers.BooleanField(read_only=True)\n \n class Meta:\n model = Room\n fields = ['id', 'name', 'created_at', 'is_member']"
},
{
"identifier": "RoomSerializer",
"path": "backend/chat/serializers.py",
"snippet": "class RoomSerializer(serializers.ModelSerializer):\n member_count = serializers.SerializerMethodField()\n last_message = LastMessageSerializer(read_only=True)\n\n def get_member_count(self, obj):\n return obj.member_count\n\n class Meta:\n model = Room\n fields = ['id', 'name', 'version', 'bumped_at', 'member_count', 'last_message']"
},
{
"identifier": "RoomMemberSerializer",
"path": "backend/chat/serializers.py",
"snippet": "class RoomMemberSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n room = RoomSerializer(read_only=True)\n \n class Meta:\n model = RoomMember\n fields = ['room', 'user']"
}
] | import json
import logging
import requests
from requests.adapters import HTTPAdapter, Retry
from django.conf import settings
from django.db import transaction
from django.db.models import Exists, OuterRef, Count
from django.shortcuts import get_object_or_404
from django.utils import timezone
from rest_framework import status, viewsets
from rest_framework.generics import ListCreateAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from .models import Message, Room, RoomMember, Outbox, CDC
from .serializers import MessageSerializer, RoomSearchSerializer, RoomSerializer, RoomMemberSerializer | 1,862 |
class RoomListViewSet(ListModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return Room.objects.annotate(
member_count=Count('memberships__id')
).filter(
memberships__user_id=self.request.user.pk
).select_related('last_message', 'last_message__user').order_by('-bumped_at')
class RoomDetailViewSet(RetrieveModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return Room.objects.annotate(
member_count=Count('memberships')
).filter(memberships__user_id=self.request.user.pk)
class RoomSearchViewSet(viewsets.ModelViewSet):
serializer_class = RoomSearchSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
user = self.request.user
user_membership = RoomMember.objects.filter(
room=OuterRef('pk'),
user=user
)
return Room.objects.annotate(
is_member=Exists(user_membership)
).order_by('name')
class CentrifugoMixin:
# A helper method to return the list of channels for all current members of specific room.
# So that the change in the room may be broadcasted to all the members.
def get_room_member_channels(self, room_id):
members = RoomMember.objects.filter(room_id=room_id).values_list('user', flat=True)
return [f'personal:{user_id}' for user_id in members]
def broadcast_room(self, room_id, broadcast_payload):
# Using Centrifugo HTTP API is the simplest way to send real-time message, and usually
# it provides the best latency. The trade-off here is that error here may result in
# lost real-time event. Depending on the application requirements this may be fine or not.
def broadcast():
session = requests.Session()
retries = Retry(total=1, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
session.mount('http://', HTTPAdapter(max_retries=retries))
try:
session.post(
settings.CENTRIFUGO_HTTP_API_ENDPOINT + '/api/broadcast',
data=json.dumps(broadcast_payload),
headers={
'Content-type': 'application/json',
'X-API-Key': settings.CENTRIFUGO_HTTP_API_KEY,
'X-Centrifugo-Error-Mode': 'transport'
}
)
except requests.exceptions.RequestException as e:
logging.error(e)
if settings.CENTRIFUGO_BROADCAST_MODE == 'api':
# We need to use on_commit here to not send notification to Centrifugo before
# changes applied to the database. Since we are inside transaction.atomic block
# broadcast will happen only after successful transaction commit.
transaction.on_commit(broadcast)
elif settings.CENTRIFUGO_BROADCAST_MODE == 'outbox':
# In outbox case we can set partition for parallel processing, but
# it must be in predefined range and match Centrifugo PostgreSQL
# consumer configuration.
partition = hash(room_id)%settings.CENTRIFUGO_OUTBOX_PARTITIONS
# Creating outbox object inside transaction will guarantee that Centrifugo will
# process the command at some point. In normal conditions – almost instantly.
Outbox.objects.create(method='broadcast', payload=broadcast_payload, partition=partition)
elif settings.CENTRIFUGO_BROADCAST_MODE == 'cdc':
# In cdc case Debezium will use this field for setting Kafka partition.
# We should not prepare proper partition ourselves in this case.
partition = hash(room_id)
# Creating outbox object inside transaction will guarantee that Centrifugo will
# process the command at some point. In normal conditions – almost instantly. In this
# app Debezium will perform CDC and send outbox events to Kafka, event will be then
# consumed by Centrifugo. The advantages here is that Debezium reads WAL changes and
# has a negligible overhead on database performance. And most efficient partitioning.
# The trade-off is that more hops add more real-time event delivery latency. May be
# still instant enough though.
|
class RoomListViewSet(ListModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return Room.objects.annotate(
member_count=Count('memberships__id')
).filter(
memberships__user_id=self.request.user.pk
).select_related('last_message', 'last_message__user').order_by('-bumped_at')
class RoomDetailViewSet(RetrieveModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return Room.objects.annotate(
member_count=Count('memberships')
).filter(memberships__user_id=self.request.user.pk)
class RoomSearchViewSet(viewsets.ModelViewSet):
serializer_class = RoomSearchSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
user = self.request.user
user_membership = RoomMember.objects.filter(
room=OuterRef('pk'),
user=user
)
return Room.objects.annotate(
is_member=Exists(user_membership)
).order_by('name')
class CentrifugoMixin:
# A helper method to return the list of channels for all current members of specific room.
# So that the change in the room may be broadcasted to all the members.
def get_room_member_channels(self, room_id):
members = RoomMember.objects.filter(room_id=room_id).values_list('user', flat=True)
return [f'personal:{user_id}' for user_id in members]
def broadcast_room(self, room_id, broadcast_payload):
# Using Centrifugo HTTP API is the simplest way to send real-time message, and usually
# it provides the best latency. The trade-off here is that error here may result in
# lost real-time event. Depending on the application requirements this may be fine or not.
def broadcast():
session = requests.Session()
retries = Retry(total=1, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
session.mount('http://', HTTPAdapter(max_retries=retries))
try:
session.post(
settings.CENTRIFUGO_HTTP_API_ENDPOINT + '/api/broadcast',
data=json.dumps(broadcast_payload),
headers={
'Content-type': 'application/json',
'X-API-Key': settings.CENTRIFUGO_HTTP_API_KEY,
'X-Centrifugo-Error-Mode': 'transport'
}
)
except requests.exceptions.RequestException as e:
logging.error(e)
if settings.CENTRIFUGO_BROADCAST_MODE == 'api':
# We need to use on_commit here to not send notification to Centrifugo before
# changes applied to the database. Since we are inside transaction.atomic block
# broadcast will happen only after successful transaction commit.
transaction.on_commit(broadcast)
elif settings.CENTRIFUGO_BROADCAST_MODE == 'outbox':
# In outbox case we can set partition for parallel processing, but
# it must be in predefined range and match Centrifugo PostgreSQL
# consumer configuration.
partition = hash(room_id)%settings.CENTRIFUGO_OUTBOX_PARTITIONS
# Creating outbox object inside transaction will guarantee that Centrifugo will
# process the command at some point. In normal conditions – almost instantly.
Outbox.objects.create(method='broadcast', payload=broadcast_payload, partition=partition)
elif settings.CENTRIFUGO_BROADCAST_MODE == 'cdc':
# In cdc case Debezium will use this field for setting Kafka partition.
# We should not prepare proper partition ourselves in this case.
partition = hash(room_id)
# Creating outbox object inside transaction will guarantee that Centrifugo will
# process the command at some point. In normal conditions – almost instantly. In this
# app Debezium will perform CDC and send outbox events to Kafka, event will be then
# consumed by Centrifugo. The advantages here is that Debezium reads WAL changes and
# has a negligible overhead on database performance. And most efficient partitioning.
# The trade-off is that more hops add more real-time event delivery latency. May be
# still instant enough though. | CDC.objects.create(method='broadcast', payload=broadcast_payload, partition=partition) | 4 | 2023-12-06 10:13:26+00:00 | 4k |
HACHIX-CORPORATION/LEDGO | zoomable_graphics_view.py | [
{
"identifier": "get_width_height_from_text",
"path": "utils.py",
"snippet": "def get_width_height_from_text(text, font_size, font_family):\n \"\"\"get Rectangle from text and font size\n\n Args:\n text (str): plain text\n font_size (int): font size\n font_family (str): font family\n\n Returns:\n width (int): Returns the width of text\n height (int): Returns the height of text\n \"\"\"\n if font_family is not None:\n font = QFont(font_family)\n else:\n font = QFont()\n font.setPixelSize(font_size)\n\n font.setPixelSize(font_size)\n metrics = QFontMetrics(font)\n rect = metrics.boundingRect(QRect(0,0,0,0), Qt.AlignTop, text)\n height = rect.height()\n width = rect.width()\n return width, height"
},
{
"identifier": "get_max_font_size",
"path": "utils.py",
"snippet": "def get_max_font_size(text, max_width, max_height, font_family):\n \"\"\"get max font size\n\n Args:\n text (str): text\n max_width (int): width\n max_height (int): height\n font_family (str): font family\n\n Returns:\n font_size (int): font size\n \"\"\"\n font_size = 1\n while True:\n text_width, text_height = get_width_height_from_text(text, font_size + 1, font_family)\n if not (text_width <= max_width and text_height <= max_height and font_size + 1 <= max_height):\n break\n font_size += 1\n\n return font_size"
},
{
"identifier": "get_font_family",
"path": "utils.py",
"snippet": "def get_font_family():\n \"\"\"Register font family\n\n Returns:\n font_family (str): font family\n \"\"\"\n font_file = 'HiraginoKakuGothicProW3.otf'\n font_path = os.path.join(os.path.dirname(__file__), 'font', font_file)\n font_id = QFontDatabase.addApplicationFont(font_path)\n font_family = None\n if font_id != -1:\n font_family = QFontDatabase.applicationFontFamilies(font_id)[0]\n return font_family"
}
] | import math
import numpy as np
import os
from PySide6.QtWidgets import QGraphicsView, QGraphicsRectItem, QInputDialog, \
QGraphicsItem, QMessageBox, QTextEdit, QHBoxLayout, QLabel, QPushButton
from PySide6.QtGui import QPainter, QFont, QBrush, QImage, QColor, QFontMetrics, QPen, QTransform, QFontDatabase
from PySide6.QtCore import Qt,QPointF, QRectF, QRect
from PySide6.QtWidgets import QDialog, QLineEdit, QFormLayout, QDialogButtonBox, QDoubleSpinBox, QVBoxLayout
from utils import get_width_height_from_text, get_max_font_size, get_font_family | 2,850 | return self.textLineEdit.toPlainText(), self.font_spinbox.value(), self.scaleVSpinBox.value(), self.scaleHSpinBox.value(), self.rotationSpinBox.value()
def handle_click_button(self, button):
color = button.palette().color(button.backgroundRole())
print(color)
print("Property: ",button.property("selected"))
if not button.property("selected"):
button.setStyleSheet(
f"background-color: {color.name()}; border: 4px solid green; border-radius: 5px;") # Add rounded corners and border
button.setProperty("selected", True)
else:
button.setStyleSheet(
f"background-color: {color.name()}; border: 1px solid black; border-radius: 5px;") # Add rounded corners
button.setProperty("selected", False)
for other_button in self.list_button_color:
if other_button != button and other_button.property("selected"):
other_button.setStyleSheet(
f"background-color: {other_button.palette().color(other_button.backgroundRole()).name()}; border: 1px solid black; border-radius: 5px;") # Add rounded corners
other_button.setProperty("selected", False)
self.selected_text_color = color.name()
def on_click_button_color(self, button):
return lambda: self.handle_click_button(button)
class InputTextDialog(QDialog):
def __init__(self, parent=None, default_text="", font_size=MIN_FONT_SIZE, topleft_pos = (0,0)):
super(InputTextDialog, self).__init__(parent)
self.zoomable = parent
self.topleft_pos = topleft_pos
self.setWindowTitle(self.zoomable.parent.data["input"]["Enter_text"].get(self.zoomable.parent.language_code))
self.initUI(default_text, font_size)
def initUI(self, default_text, font_size):
# Set up layout
vbox = QVBoxLayout()
# Create form layout to add fields
formLayout = QFormLayout()
# Text input field
self.textLineEdit = QTextEdit()
self.textLineEdit.setPlainText(default_text)
formLayout.addRow(self.zoomable.parent.data["input"]["Text"].get(self.zoomable.parent.language_code), self.textLineEdit)
# Scale input field
self.font_spinbox = QDoubleSpinBox()
self.font_spinbox.setRange(0, self.zoomable.parent.num_row)
self.font_spinbox.setValue(font_size)
formLayout.addRow(self.zoomable.parent.data["input"]["Font_size"].get(self.zoomable.parent.language_code), self.font_spinbox)
vbox.addLayout(formLayout)
# OK and Cancel buttons
buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, self)
# Connect the QDialogButtonBox's rejected event to the QDialog's reject method
buttons.rejected.connect(self.reject)
# Add QDialogButtonBox to the layout
vbox.addWidget(buttons)
# Access the "Ok" and "Cancel" buttons and set text for them
ok_button = buttons.button(QDialogButtonBox.Ok)
cancel_button = buttons.button(QDialogButtonBox.Cancel)
ok_button.setText(self.zoomable.parent.data["message"]["Yes"].get(self.zoomable.parent.language_code))
cancel_button.setText(self.zoomable.parent.data["message"]["Cancel"].get(self.zoomable.parent.language_code))
ok_button.clicked.connect(self.on_accept)
cancel_button.clicked.connect(self.reject)
self.setLayout(vbox)
def on_accept(self):
text = self.textLineEdit.toPlainText()
dot_size = self.zoomable.parent.dot_size
x_first, y_first = self.topleft_pos
x_coord, y_coord = math.ceil(x_first)/dot_size, math.ceil(y_first)/dot_size
# check height width oversize
width, height = get_width_height_from_text(text, self.font_spinbox.value(), self.zoomable.font_family)
max_height = self.zoomable.parent.num_row
max_width = self.zoomable.parent.num_col
if y_coord + height > max_height or x_coord + width > max_width:
font_size = get_max_font_size(text, max_width - x_coord, max_height - y_coord, self.zoomable.font_family)
self.font_spinbox.setValue(font_size)
self.accept()
def getInputs(self):
print("New font:", self.font_spinbox.value())
return self.textLineEdit.toPlainText(), self.font_spinbox.value()
class CustomRectItem(QGraphicsRectItem):
def __init__(self, rect, graphics_view, *args, **kwargs):
super().__init__(rect, *args, **kwargs) # rect is the rectangle dimensions
self.graphics_view = graphics_view
def mouseReleaseEvent(self, event):
super().mouseReleaseEvent(event)
print(f"Mouse released after move customRect: {event}")
self.graphics_view.handle_custom_rect_item_released()
class ZoomableGraphicsView(QGraphicsView):
def __init__(self, scene, parent=None):
super().__init__(scene)
self.parent = parent
self.setRenderHint(QPainter.Antialiasing)
self.setDragMode(QGraphicsView.NoDrag)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QGraphicsView.AnchorUnderMouse)
self.zoom_factor_base = 1.25 # or any desired zoom factor
self.dot_size = 2
self.last_known_rubberband_rect = 0
self.layer_color = QColor.fromRgbF(0.313726, 0.313726, 0.313726, 1.000000)
self.transparent_color = QColor(0, 0, 0, 0)
self.num_col = self.parent.num_col
self.num_row = self.parent.num_row
self.text_color = None
self.is_selected = False
self.zoom_factor = 1
|
DEFAULT_VALUE_OF_ITEM = None # Default value item GraphicsScene for layer >0
MIN_FONT_SIZE = 8 # min size text of paint is 8
DEFAULT_FONT_SIZE = 16
class MultiInputDialogue(QDialog):
def __init__(self, parent=None, default_text="", default_scale_v=1.0,
default_scale_h=1.0, default_rotation=0.0, font_size=None, text_color=None):
super(MultiInputDialogue, self).__init__(parent)
self.zoomable = parent
self.selected_text_color = text_color
self.initUI(default_text, default_scale_v, default_scale_h, default_rotation, font_size)
if self.zoomable.parent.language_code == "eng":
self.setWindowTitle("Edit")
else:
self.setWindowTitle("編集")
def initUI(self, default_text, default_scale_v, default_scale_h, default_rotation, font_size=None):
# Set up layout
vbox = QVBoxLayout()
# Create form layout to add fields
formLayout = QFormLayout()
# Text input field
self.textLineEdit = QTextEdit()
self.textLineEdit.setPlainText(default_text)
formLayout.addRow(self.zoomable.parent.data["input"]["Text"].get(self.zoomable.parent.language_code), self.textLineEdit)
# Font size
self.font_spinbox = QDoubleSpinBox()
self.font_spinbox.setRange(MIN_FONT_SIZE, 384.0)
self.font_spinbox.setSingleStep(0.1)
if font_size is not None:
self.font_spinbox.setValue(font_size)
formLayout.addRow(self.zoomable.parent.data["input"]["Font_size"].get(self.zoomable.parent.language_code), self.font_spinbox)
# Scale input field
self.scaleVSpinBox = QDoubleSpinBox()
self.scaleVSpinBox.setRange(0.1, 100)
self.scaleVSpinBox.setSingleStep(0.1)
self.scaleVSpinBox.setValue(default_scale_v)
formLayout.addRow(self.zoomable.parent.data["input"]["Scale_v"].get(self.zoomable.parent.language_code), self.scaleVSpinBox)
self.scaleHSpinBox = QDoubleSpinBox()
self.scaleHSpinBox.setRange(0.1, 100)
self.scaleHSpinBox.setValue(default_scale_h)
self.scaleHSpinBox.setSingleStep(0.1)
formLayout.addRow(self.zoomable.parent.data["input"]["Scale_h"].get(self.zoomable.parent.language_code), self.scaleHSpinBox)
# Rotation input field
self.rotationSpinBox = QDoubleSpinBox()
self.rotationSpinBox.setRange(-360.0, 360.0)
self.rotationSpinBox.setValue(default_rotation)
self.rotationSpinBox.setSingleStep(0.1)
formLayout.addRow(self.zoomable.parent.data["input"]["Rotation"].get(self.zoomable.parent.language_code), self.rotationSpinBox)
# Select color
if self.selected_text_color is not None:
self.select_color = QHBoxLayout()
self.list_button_color = []
for color in self.zoomable.parent.colors:
button = QPushButton()
if color.lower() == self.selected_text_color:
button.setStyleSheet(
f"background-color: {color}; border: 4px solid green; border-radius: 5px;") # Add rounded corners and border
button.setProperty("selected", True)
else:
button.setStyleSheet(f"background-color: {color}; border: 1px solid black; border-radius: 5px;")
button.setProperty("selected", False)
button.clicked.connect(self.on_click_button_color(button))
self.list_button_color.append(button)
self.select_color.addWidget(button)
formLayout.addRow(self.zoomable.parent.data["label"]["topbar_widget.select_color_label"].get(self.zoomable.parent.language_code), self.select_color)
# end select color
vbox.addLayout(formLayout)
# OK and Cancel buttons
buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, self)
# Add QDialogButtonBox to layout
vbox.addWidget(buttons)
# Access the "Ok" and "Cancel" buttons and set text for them
ok_button = buttons.button(QDialogButtonBox.Ok)
cancel_button = buttons.button(QDialogButtonBox.Cancel)
ok_button.setText(self.zoomable.parent.data["message"]["Yes"].get(self.zoomable.parent.language_code))
cancel_button.setText(self.zoomable.parent.data["message"]["Cancel"].get(self.zoomable.parent.language_code))
ok_button.clicked.connect(self.accept)
cancel_button.clicked.connect(self.reject)
self.setLayout(vbox)
def getInputs(self):
return self.textLineEdit.toPlainText(), self.font_spinbox.value(), self.scaleVSpinBox.value(), self.scaleHSpinBox.value(), self.rotationSpinBox.value()
def handle_click_button(self, button):
color = button.palette().color(button.backgroundRole())
print(color)
print("Property: ",button.property("selected"))
if not button.property("selected"):
button.setStyleSheet(
f"background-color: {color.name()}; border: 4px solid green; border-radius: 5px;") # Add rounded corners and border
button.setProperty("selected", True)
else:
button.setStyleSheet(
f"background-color: {color.name()}; border: 1px solid black; border-radius: 5px;") # Add rounded corners
button.setProperty("selected", False)
for other_button in self.list_button_color:
if other_button != button and other_button.property("selected"):
other_button.setStyleSheet(
f"background-color: {other_button.palette().color(other_button.backgroundRole()).name()}; border: 1px solid black; border-radius: 5px;") # Add rounded corners
other_button.setProperty("selected", False)
self.selected_text_color = color.name()
def on_click_button_color(self, button):
return lambda: self.handle_click_button(button)
class InputTextDialog(QDialog):
def __init__(self, parent=None, default_text="", font_size=MIN_FONT_SIZE, topleft_pos = (0,0)):
super(InputTextDialog, self).__init__(parent)
self.zoomable = parent
self.topleft_pos = topleft_pos
self.setWindowTitle(self.zoomable.parent.data["input"]["Enter_text"].get(self.zoomable.parent.language_code))
self.initUI(default_text, font_size)
def initUI(self, default_text, font_size):
# Set up layout
vbox = QVBoxLayout()
# Create form layout to add fields
formLayout = QFormLayout()
# Text input field
self.textLineEdit = QTextEdit()
self.textLineEdit.setPlainText(default_text)
formLayout.addRow(self.zoomable.parent.data["input"]["Text"].get(self.zoomable.parent.language_code), self.textLineEdit)
# Scale input field
self.font_spinbox = QDoubleSpinBox()
self.font_spinbox.setRange(0, self.zoomable.parent.num_row)
self.font_spinbox.setValue(font_size)
formLayout.addRow(self.zoomable.parent.data["input"]["Font_size"].get(self.zoomable.parent.language_code), self.font_spinbox)
vbox.addLayout(formLayout)
# OK and Cancel buttons
buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, self)
# Connect the QDialogButtonBox's rejected event to the QDialog's reject method
buttons.rejected.connect(self.reject)
# Add QDialogButtonBox to the layout
vbox.addWidget(buttons)
# Access the "Ok" and "Cancel" buttons and set text for them
ok_button = buttons.button(QDialogButtonBox.Ok)
cancel_button = buttons.button(QDialogButtonBox.Cancel)
ok_button.setText(self.zoomable.parent.data["message"]["Yes"].get(self.zoomable.parent.language_code))
cancel_button.setText(self.zoomable.parent.data["message"]["Cancel"].get(self.zoomable.parent.language_code))
ok_button.clicked.connect(self.on_accept)
cancel_button.clicked.connect(self.reject)
self.setLayout(vbox)
def on_accept(self):
text = self.textLineEdit.toPlainText()
dot_size = self.zoomable.parent.dot_size
x_first, y_first = self.topleft_pos
x_coord, y_coord = math.ceil(x_first)/dot_size, math.ceil(y_first)/dot_size
# check height width oversize
width, height = get_width_height_from_text(text, self.font_spinbox.value(), self.zoomable.font_family)
max_height = self.zoomable.parent.num_row
max_width = self.zoomable.parent.num_col
if y_coord + height > max_height or x_coord + width > max_width:
font_size = get_max_font_size(text, max_width - x_coord, max_height - y_coord, self.zoomable.font_family)
self.font_spinbox.setValue(font_size)
self.accept()
def getInputs(self):
print("New font:", self.font_spinbox.value())
return self.textLineEdit.toPlainText(), self.font_spinbox.value()
class CustomRectItem(QGraphicsRectItem):
def __init__(self, rect, graphics_view, *args, **kwargs):
super().__init__(rect, *args, **kwargs) # rect is the rectangle dimensions
self.graphics_view = graphics_view
def mouseReleaseEvent(self, event):
super().mouseReleaseEvent(event)
print(f"Mouse released after move customRect: {event}")
self.graphics_view.handle_custom_rect_item_released()
class ZoomableGraphicsView(QGraphicsView):
def __init__(self, scene, parent=None):
super().__init__(scene)
self.parent = parent
self.setRenderHint(QPainter.Antialiasing)
self.setDragMode(QGraphicsView.NoDrag)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QGraphicsView.AnchorUnderMouse)
self.zoom_factor_base = 1.25 # or any desired zoom factor
self.dot_size = 2
self.last_known_rubberband_rect = 0
self.layer_color = QColor.fromRgbF(0.313726, 0.313726, 0.313726, 1.000000)
self.transparent_color = QColor(0, 0, 0, 0)
self.num_col = self.parent.num_col
self.num_row = self.parent.num_row
self.text_color = None
self.is_selected = False
self.zoom_factor = 1 | self.font_family = get_font_family() | 2 | 2023-12-08 04:05:16+00:00 | 4k |
shinkungoo/SymbolicCDM | SCDM/model.py | [
{
"identifier": "StudentDataSet",
"path": "SCDM/utility.py",
"snippet": "class StudentDataSet(Dataset):\n def __init__(self, loaded_data):\n \"\"\"\n This class is designed for transforming loaded_data from np.ndarray to Dataset.\n \"\"\"\n self.data = loaded_data\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n return self.data[idx]"
},
{
"identifier": "print_logs",
"path": "SCDM/utility.py",
"snippet": "def print_logs(metric, headers, title):\n print(title)\n df_string = DataFrame(data=[metric], columns=headers).to_string(index=False)\n print(\"-\" * (len(df_string) // 2))\n print(df_string)\n print(\"-\" * (len(df_string) // 2))"
},
{
"identifier": "transform",
"path": "SCDM/utility.py",
"snippet": "def transform(student_id, question, y, q_matrix=None):\n \"\"\"\n Transform data to match the input of parameter optimization\n\n :return: torch.DataLoader(batch_size=32)\n \"\"\"\n if q_matrix is None:\n dataset = TensorDataset(torch.tensor(student_id, dtype=torch.int64) - 1,\n torch.tensor(question, dtype=torch.int64) - 1,\n torch.tensor(y, dtype=torch.float32))\n else:\n q_matrix_line = q_matrix[question - 1]\n dataset = TensorDataset(torch.tensor(student_id, dtype=torch.int64) - 1,\n torch.tensor(question, dtype=torch.int64) - 1,\n q_matrix_line,\n torch.tensor(y, dtype=torch.float32))\n return DataLoader(dataset, batch_size=32)"
},
{
"identifier": "GeneticInteractionFunc",
"path": "SCDM/interaction.py",
"snippet": "class GeneticInteractionFunc:\n def __init__(self, train_set, train_size):\n # genetic programming and algorithm init\n creator.create(\"fitness_if\", base.Fitness, weights=(1.0, 1.0))\n creator.create(\"individual\", gp.PrimitiveTree, fitness=creator.fitness_if)\n\n self.train_size = train_size\n\n self.proficiency = None\n self.difficulty = None\n self.discrimination = None\n\n self.train_set = train_set\n self.interaction = InteractionFunc(train_set)\n\n self.interaction_funcs = []\n self.interaction_funcs_string = []\n\n def __str__(self):\n if len(self.interaction_funcs) != 0:\n return self.interaction_funcs_string[0]\n else:\n return \"default\"\n\n def evaluation(self, test_data) -> tuple:\n current_interaction_func = self.function()\n prediction, truth = exam(test_data,\n self.proficiency,\n self.difficulty,\n self.discrimination,\n current_interaction_func,)\n\n acc = accuracy(prediction, truth)\n auc = area_under_curve(prediction, truth)\n f1 = f1_score(prediction, truth)\n\n return acc, auc, f1,\n\n def train(self):\n print(\"Genetic programming search\")\n interaction_funcs = []\n interaction_funcs_string = []\n self.interaction.train()\n interaction_funcs.append(self.interaction.unpack(is_compiled=True))\n interaction_funcs_string.append(str(self.interaction.unpack()))\n self.interaction_funcs = interaction_funcs\n self.interaction_funcs_string = interaction_funcs_string\n print(\"Final Function:\", str(self))\n\n def function(self):\n if len(self.interaction_funcs) != 0:\n def final_function(discrimination, proficiency_level, q_matrix):\n return self.interaction_funcs[0](discrimination, proficiency_level, q_matrix)\n return final_function\n else:\n return init_interaction_function\n\n def update(self, proficiency, difficulty, discrimination):\n self.proficiency = proficiency.copy()\n self.difficulty = difficulty.copy()\n self.discrimination = discrimination.copy()\n self.interaction.update(proficiency, difficulty, discrimination)"
},
{
"identifier": "Parameter",
"path": "SCDM/parameter.py",
"snippet": "class Parameter:\n def __init__(self,\n student_number: int,\n question_number: int,\n knowledge_number: int,):\n self.net = ComputeIF(student_number, question_number, knowledge_number)\n self.student_number = student_number\n self.question_number = question_number\n self.knowledge_number = knowledge_number\n self.interaction_function = init_interaction_function\n self.interaction_function_string = \"initial interaction function\"\n\n def train(self, train_set, epochs, device=\"cpu\", lr=0.002, init=True):\n # initialize\n if init:\n for name, param in self.net.named_parameters():\n if \"weight\" in name:\n nn.init.xavier_normal_(param)\n self.net = self.net.to(device)\n self.net.train()\n loss_function = nn.BCELoss()\n optimizer = torch.optim.Adam(self.net.parameters(), lr=lr)\n with tqdm(total=epochs, desc=\"Training Process\", unit=\"epoch\") as pbar:\n for epoch in range(epochs):\n epoch_losses = []\n for batch_data in train_set:\n student_id, question, q_matrix_line, y = batch_data\n student_id: torch.Tensor = student_id.to(device)\n question: torch.Tensor = question.to(device)\n q_matrix_line: torch.Tensor = q_matrix_line.to(device)\n y: torch.Tensor = y.to(device)\n pred: torch.Tensor = self.net(student_id,\n question,\n q_matrix_line,\n self.interaction_function)\n loss = loss_function(pred, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n epoch_losses.append(loss.mean().item())\n pbar.update()\n\n def evaluate(self, test_set, interaction_func, device=\"cpu\"):\n self.net = self.net.to(device)\n self.net.eval()\n y_true, y_pred = [], []\n for batch_data in test_set:\n student_id, question, q_matrix_line, y = batch_data\n student_id: torch.Tensor = student_id.to(device)\n question: torch.Tensor = question.to(device)\n q_matrix_line: torch.Tensor = q_matrix_line.to(device)\n pred: torch.Tensor = self.net(student_id,\n question,\n q_matrix_line,\n interaction_func)\n y_pred.extend(pred.detach().cpu().tolist())\n y_true.extend(y.tolist())\n\n acc = accuracy(y_pred, y_true)\n auc = area_under_curve(y_pred, y_true)\n f1 = f1_score(y_pred, y_true)\n return acc, auc, f1,\n\n def unpack(self):\n proficiency_level = self.net.student_emb(torch.arange(0, self.student_number).to()).detach().cpu().numpy()\n difficulty = self.net.difficulty(torch.arange(0, self.question_number).to()).detach().cpu().numpy()\n discrimination = self.net.discrimination(torch.arange(0, self.question_number).to()).detach().cpu().numpy()\n return proficiency_level, difficulty, discrimination,\n\n def update(self, interaction_func, interaction_func_str):\n self.interaction_function = interaction_func\n self.interaction_function_string = interaction_func_str"
},
{
"identifier": "degree_of_agreement",
"path": "SCDM/eval.py",
"snippet": "def degree_of_agreement(q_matrix, proficiency, dataset):\n problem_number, knowledge_number = q_matrix.shape\n student_number = proficiency.shape[0]\n r_matrix = np.full((student_number, problem_number), -1)\n for lines in dataset:\n student_id_batch, question_batch, _, y_batch = lines\n for student_id, question, y in zip(student_id_batch, question_batch, y_batch ):\n r_matrix[student_id][question] = y\n doaList = []\n for k in range(knowledge_number):\n numerator = 0.0\n denominator = 0.0\n delta_matrix = proficiency[:, k].reshape(-1, 1) > proficiency[:, k].reshape(1, -1)\n question_hask = np.where(q_matrix[:, k] != 0)[0].tolist()\n for j in question_hask:\n # avoid blank logs\n row_vec = (r_matrix[:, j].reshape(1, -1) != -1).astype(int)\n column_vec = (r_matrix[:, j].reshape(-1, 1) != -1).astype(int)\n mask = row_vec * column_vec\n delta_response_logs = r_matrix[:, j].reshape(-1, 1) > r_matrix[:, j].reshape(1, -1)\n i_matrix = r_matrix[:, j].reshape(-1, 1) != r_matrix[:, j].reshape(1, -1)\n numerator += np.sum(delta_matrix * np.logical_and(mask, delta_response_logs))\n denominator += np.sum(delta_matrix * np.logical_and(mask, i_matrix))\n doaList.append(numerator / denominator)\n\n return np.mean(doaList)"
}
] | import warnings
import numpy as np
import torch
import pprint
from torch.utils.data import random_split
from .utility import StudentDataSet, print_logs, transform
from .interaction import GeneticInteractionFunc
from .parameter import Parameter
from .eval import degree_of_agreement | 2,308 |
class SymbolicCDM:
def __init__(self,
q_matrix: np.ndarray,
student_number: int,
question_number: int,
knowledge_number: int,
response_logs: np.ndarray,
device="cpu"):
# dataset split
response_logs = StudentDataSet(response_logs)
# organize dataset
train_size = int(len(response_logs) * 0.75)
valid_size = len(response_logs) - train_size
train_set, valid_set = random_split(response_logs, [train_size, valid_size])
train_set = np.array(train_set)
valid_set = np.array(valid_set)
self.train_set = transform(train_set[:, 0], train_set[:, 1], train_set[:, 2], torch.Tensor(q_matrix))
self.train_size = train_size
self.valid_set = transform(valid_set[:, 0], valid_set[:, 1], valid_set[:, 2], torch.Tensor(q_matrix))
self.interaction = GeneticInteractionFunc(self.train_set, train_size)
|
class SymbolicCDM:
def __init__(self,
q_matrix: np.ndarray,
student_number: int,
question_number: int,
knowledge_number: int,
response_logs: np.ndarray,
device="cpu"):
# dataset split
response_logs = StudentDataSet(response_logs)
# organize dataset
train_size = int(len(response_logs) * 0.75)
valid_size = len(response_logs) - train_size
train_set, valid_set = random_split(response_logs, [train_size, valid_size])
train_set = np.array(train_set)
valid_set = np.array(valid_set)
self.train_set = transform(train_set[:, 0], train_set[:, 1], train_set[:, 2], torch.Tensor(q_matrix))
self.train_size = train_size
self.valid_set = transform(valid_set[:, 0], valid_set[:, 1], valid_set[:, 2], torch.Tensor(q_matrix))
self.interaction = GeneticInteractionFunc(self.train_set, train_size) | self.parameter = Parameter(student_number, | 4 | 2023-12-09 13:37:15+00:00 | 4k |
pan-x-c/EE-LLM | megatron/global_vars.py | [
{
"identifier": "dist_signal_handler",
"path": "megatron/dist_signal_handler.py",
"snippet": "def get_world_size():\ndef get_device(local_rank=None):\ndef all_gather_item(item, dtype, group=None, async_op=False, local_rank=None):\n def __init__(self, sig=signal.SIGTERM):\n def signals_received(self):\n def __enter__(self):\n def handler(signum, frame):\n def __exit__(self, type, value, tb):\n def release(self):\nclass DistributedSignalHandler:"
},
{
"identifier": "build_tokenizer",
"path": "megatron/tokenizer/tokenizer.py",
"snippet": "def build_tokenizer(args):\n \"\"\"Initialize tokenizer.\"\"\"\n if args.rank == 0:\n print('> building {} tokenizer ...'.format(args.tokenizer_type),\n flush=True)\n\n # Select and instantiate the tokenizer.\n if args.tokenizer_type == 'BertWordPieceLowerCase':\n assert args.vocab_file is not None\n tokenizer = _BertWordPieceTokenizer(vocab_file=args.vocab_file,\n lower_case=True,\n vocab_extra_ids=args.vocab_extra_ids)\n elif args.tokenizer_type == 'BertWordPieceCase':\n assert args.vocab_file is not None\n tokenizer = _BertWordPieceTokenizer(vocab_file=args.vocab_file,\n lower_case=False,\n vocab_extra_ids=args.vocab_extra_ids)\n elif args.tokenizer_type == 'GPT2BPETokenizer':\n assert args.vocab_file is not None\n assert args.merge_file is not None\n tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)\n elif args.tokenizer_type == 'SentencePieceTokenizer':\n assert args.tokenizer_model is not None\n tokenizer = _SentencePieceTokenizer(args.tokenizer_model, vocab_extra_ids=args.vocab_extra_ids)\n elif args.tokenizer_type == 'GPTSentencePieceTokenizer':\n assert args.tokenizer_model is not None\n tokenizer = _GPTSentencePieceTokenizer(args.tokenizer_model)\n elif args.tokenizer_type == 'Llama2Tokenizer':\n assert args.tokenizer_model is not None\n tokenizer = _Llama2Tokenizer(args.tokenizer_model)\n elif args.tokenizer_type == 'NullTokenizer':\n assert args.vocab_size is not None\n tokenizer = _NullTokenizer(args.vocab_size)\n else:\n raise NotImplementedError('{} tokenizer is not '\n 'implemented.'.format(args.tokenizer_type))\n\n # Add vocab size (if not already set from a checkpoint).\n if getattr(args, \"padded_vocab_size\", None) is None:\n args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size,\n args)\n\n return tokenizer"
},
{
"identifier": "build_num_microbatches_calculator",
"path": "megatron/microbatches.py",
"snippet": "def build_num_microbatches_calculator(args):\n\n # Constant num micro-batches.\n if args.rampup_batch_size is None:\n num_microbatches_calculator = ConstantNumMicroBatches(\n args.global_batch_size, args.micro_batch_size,\n args.data_parallel_size)\n if args.rank == 0:\n print('setting number of micro-batches to constant {}'.format(\n num_microbatches_calculator.get()), flush=True)\n\n else:\n assert len(args.rampup_batch_size) == 3, 'expected the following ' \\\n 'format: --rampup-batch-size <start batch size> ' \\\n '<batch size incerement> <ramp-up samples>'\n start_batch_size = int(args.rampup_batch_size[0])\n batch_size_increment = int(args.rampup_batch_size[1])\n ramup_samples = int(args.rampup_batch_size[2])\n if args.rank == 0:\n print('will use batch size rampup starting from global batch '\n 'size {} to global batch size {} with batch size increments '\n '{} over {} samples.'.format(start_batch_size,\n args.global_batch_size,\n batch_size_increment,\n ramup_samples), flush=True)\n num_microbatches_calculator = RampupBatchsizeNumMicroBatches(\n start_batch_size, batch_size_increment, ramup_samples,\n args.global_batch_size, args.micro_batch_size,\n args.data_parallel_size)\n\n return num_microbatches_calculator"
},
{
"identifier": "Timers",
"path": "megatron/timers.py",
"snippet": "class Timers:\n \"\"\"Group of timers.\"\"\"\n\n def __init__(self, log_level, log_option):\n self._log_level = log_level\n self._log_option = log_option\n self._timers = {}\n self._log_levels = {}\n self._dummy_timer = DummyTimer()\n self._max_log_level = 2\n\n\n def __call__(self, name, log_level=None):\n # If the timer has already been set, then check if the log-level\n # is provided, it matches the one that the timer was created with.\n if name in self._timers:\n if log_level is not None:\n assert log_level == self._log_levels[name], \\\n 'input log level {} does not match already existing '\\\n 'log level {} for {} timer'.format(\n log_level, self._log_levels[name], name)\n return self._timers[name]\n # If timer does not exist and no log level is provided,\n # set it to the max log level which is 2.\n if log_level is None:\n log_level = self._max_log_level\n assert log_level <= self._max_log_level, \\\n 'log level {} is larger than max supported log level {}'.format(\n log_level, self._max_log_level)\n # Now if the input log level is larger than the one set for\n # the timers class, just ignore it and return a dummy timer.\n if log_level > self._log_level:\n return self._dummy_timer\n # Otherwise, initalize the timer and set the level.\n self._timers[name] = Timer(name)\n self._log_levels[name] = log_level\n return self._timers[name]\n\n\n def _get_elapsed_time_all_ranks(self, names, reset, barrier):\n \"\"\"\n Assumptions:\n - All the ranks call this function.\n - `names` are identical on all ranks.\n If the above assumptions are not met, calling this function will\n result in hang.\n Arguments:\n - names: list of timer names\n - reset: reset the timer after recording the elapsed time\n - barrier: if set, do a global barrier before time measurments\n \"\"\"\n\n # First make sure all the callers are in sync.\n if barrier:\n torch.distributed.barrier()\n\n world_size = torch.distributed.get_world_size()\n rank = torch.distributed.get_rank()\n\n # Here we can use gather on the rank we want to print the\n # timing, however, there is no gather_base support in\n # pytorch yet. It is simpler to deal with a single tensor\n # and since we are only gathering a small amount of data,\n # it should be ok to use all-gather instead of gather.\n rank_name_to_time = torch.zeros((world_size, len(names)),\n dtype=torch.float,\n device=torch.cuda.current_device())\n for i, name in enumerate(names):\n if name in self._timers:\n # Here we don't need to pass the barrier flag as all\n # the processes are already in sync. This avoids the\n # issue of different timers having different barrier\n # groups inside their class.\n rank_name_to_time[rank, i] = self._timers[name].elapsed(\n reset=reset)\n\n # See the note above for why we are not using gather.\n torch.distributed._all_gather_base(rank_name_to_time.view(-1),\n rank_name_to_time[rank, :].view(-1))\n\n return rank_name_to_time\n\n\n def _get_global_min_max_time(self, names, reset, barrier, normalizer):\n \"\"\"Report only min and max times across all ranks.\"\"\"\n\n rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset,\n barrier)\n name_to_min_max_time = {}\n for i, name in enumerate(names):\n rank_to_time = rank_name_to_time[:, i]\n # filter out the ones we did not have any timings for\n rank_to_time = rank_to_time[rank_to_time > 0.0]\n # If the timer exists:\n if rank_to_time.numel() > 0:\n name_to_min_max_time[name] = (\n rank_to_time.min().item() / normalizer,\n rank_to_time.max().item() / normalizer)\n return name_to_min_max_time\n\n\n def _get_global_min_max_time_string(self, names, reset, barrier,\n normalizer, max_only):\n name_to_min_max_time = self._get_global_min_max_time(\n names, reset, barrier, normalizer)\n if not name_to_min_max_time:\n return None\n output_string = '(min, max) time across ranks (ms):'\n for name in name_to_min_max_time:\n min_time, max_time = name_to_min_max_time[name]\n if max_only:\n output_string += '\\n {}: {:.2f}'.format(\n (name+' ').ljust(48, '.'), max_time)\n else:\n output_string += '\\n {}: ({:.2f}, {:.2f})'.format(\n (name+' ').ljust(48, '.'), min_time, max_time)\n return output_string\n\n\n def _get_all_ranks_time_string(self, names, reset, barrier, normalizer):\n \"\"\"Report times across all ranks.\"\"\"\n rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset,\n barrier)\n\n output_string = 'times across ranks (ms):'\n no_reported_timing = True\n for i, name in enumerate(names):\n not_yet_found = True\n for rank in range(torch.distributed.get_world_size()):\n if rank_name_to_time[rank, i] > 0:\n no_reported_timing = False\n if not_yet_found:\n not_yet_found = False\n output_string += '\\n {}:'.format(name)\n output_string += '\\n rank {:2d}: {:.2f}'.format(\n rank, rank_name_to_time[rank, i] / normalizer)\n if no_reported_timing:\n return None\n return output_string\n\n\n def log(self, names, rank=None, normalizer=1.0, reset=True, barrier=False):\n \"\"\"Log a group of timers.\"\"\"\n\n # Print.\n assert normalizer > 0.0\n if self._log_option in ['max', 'minmax']:\n max_only = False\n if self._log_option == 'max':\n max_only = True\n output_string = self._get_global_min_max_time_string(\n names, reset, barrier, normalizer/1000.0, max_only)\n elif self._log_option == 'all':\n output_string = self._get_all_ranks_time_string(names,\n reset, barrier,\n normalizer/1000.0)\n else:\n raise Exception('unknown timing log option {}'.format(\n self._log_option))\n\n # If no input rank is provided, log on last rank.\n if rank is None:\n rank = torch.distributed.get_world_size() - 1\n if rank == torch.distributed.get_rank() and output_string is not None:\n print(output_string, flush=True)\n\n\n def write(self, names, writer, wandb, iteration, normalizer=1.0,\n reset=False, barrier=False):\n \"\"\"Write timers to a tensorboard writer\n Note that we only report maximum time across ranks to tensorboard.\n \"\"\"\n # currently when using add_scalars,\n # torch.utils.add_scalars makes each timer its own run, which\n # polutes the runs list, so we just add each as a scalar\n assert normalizer > 0.0\n name_to_min_max_time = self._get_global_min_max_time(\n names, reset, barrier, normalizer)\n if writer is not None:\n for name in name_to_min_max_time:\n _, max_time = name_to_min_max_time[name]\n writer.add_scalar(name + '-time', max_time, iteration)\n if wandb is not None:\n wandb_log_dic = {}\n for name in name_to_min_max_time:\n _, max_time = name_to_min_max_time[name]\n wandb_log_dic[f'timer/{name}'] = max_time\n wandb.log(wandb_log_dic, iteration)"
}
] | import os
import sys
import torch
import wandb
from megatron import dist_signal_handler
from megatron.tokenizer import build_tokenizer
from .microbatches import build_num_microbatches_calculator
from .timers import Timers
from torch.utils.tensorboard import SummaryWriter
from userlib.auto_resume import AutoResume | 3,499 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Megatron global variables."""
_GLOBAL_ARGS = None
_GLOBAL_RETRO_ARGS = None
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = None
_GLOBAL_TOKENIZER = None
_GLOBAL_TENSORBOARD_WRITER = None
_GLOBAL_WANDB_WRITER = None
_GLOBAL_ADLR_AUTORESUME = None
_GLOBAL_TIMERS = None
_GLOBAL_SIGNAL_HANDLER = None
def get_args():
"""Return arguments."""
_ensure_var_is_initialized(_GLOBAL_ARGS, 'args')
return _GLOBAL_ARGS
def get_retro_args():
"""Return retro arguments."""
return _GLOBAL_RETRO_ARGS
def get_num_microbatches():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get()
def get_current_global_batch_size():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get_current_global_batch_size()
def update_num_microbatches(consumed_samples, consistency_check=True):
_GLOBAL_NUM_MICROBATCHES_CALCULATOR.update(consumed_samples,
consistency_check)
def get_tokenizer():
"""Return tokenizer."""
_ensure_var_is_initialized(_GLOBAL_TOKENIZER, 'tokenizer')
return _GLOBAL_TOKENIZER
def get_tensorboard_writer():
"""Return tensorboard writer. It can be None so no need
to check if it is initialized."""
return _GLOBAL_TENSORBOARD_WRITER
def get_wandb_writer():
"""Return tensorboard writer. It can be None so no need
to check if it is initialized."""
return _GLOBAL_WANDB_WRITER
def get_adlr_autoresume():
"""ADLR autoresume object. It can be None so no need
to check if it is initialized."""
return _GLOBAL_ADLR_AUTORESUME
def get_timers():
"""Return timers."""
_ensure_var_is_initialized(_GLOBAL_TIMERS, 'timers')
return _GLOBAL_TIMERS
def get_signal_handler():
_ensure_var_is_initialized(_GLOBAL_SIGNAL_HANDLER, 'signal handler')
return _GLOBAL_SIGNAL_HANDLER
def _set_signal_handler():
global _GLOBAL_SIGNAL_HANDLER
_ensure_var_is_not_initialized(_GLOBAL_SIGNAL_HANDLER, 'signal handler')
| # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Megatron global variables."""
_GLOBAL_ARGS = None
_GLOBAL_RETRO_ARGS = None
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = None
_GLOBAL_TOKENIZER = None
_GLOBAL_TENSORBOARD_WRITER = None
_GLOBAL_WANDB_WRITER = None
_GLOBAL_ADLR_AUTORESUME = None
_GLOBAL_TIMERS = None
_GLOBAL_SIGNAL_HANDLER = None
def get_args():
"""Return arguments."""
_ensure_var_is_initialized(_GLOBAL_ARGS, 'args')
return _GLOBAL_ARGS
def get_retro_args():
"""Return retro arguments."""
return _GLOBAL_RETRO_ARGS
def get_num_microbatches():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get()
def get_current_global_batch_size():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get_current_global_batch_size()
def update_num_microbatches(consumed_samples, consistency_check=True):
_GLOBAL_NUM_MICROBATCHES_CALCULATOR.update(consumed_samples,
consistency_check)
def get_tokenizer():
"""Return tokenizer."""
_ensure_var_is_initialized(_GLOBAL_TOKENIZER, 'tokenizer')
return _GLOBAL_TOKENIZER
def get_tensorboard_writer():
"""Return tensorboard writer. It can be None so no need
to check if it is initialized."""
return _GLOBAL_TENSORBOARD_WRITER
def get_wandb_writer():
"""Return tensorboard writer. It can be None so no need
to check if it is initialized."""
return _GLOBAL_WANDB_WRITER
def get_adlr_autoresume():
"""ADLR autoresume object. It can be None so no need
to check if it is initialized."""
return _GLOBAL_ADLR_AUTORESUME
def get_timers():
"""Return timers."""
_ensure_var_is_initialized(_GLOBAL_TIMERS, 'timers')
return _GLOBAL_TIMERS
def get_signal_handler():
_ensure_var_is_initialized(_GLOBAL_SIGNAL_HANDLER, 'signal handler')
return _GLOBAL_SIGNAL_HANDLER
def _set_signal_handler():
global _GLOBAL_SIGNAL_HANDLER
_ensure_var_is_not_initialized(_GLOBAL_SIGNAL_HANDLER, 'signal handler') | _GLOBAL_SIGNAL_HANDLER = dist_signal_handler.DistributedSignalHandler().__enter__() | 0 | 2023-12-07 08:29:38+00:00 | 4k |
mitrefireline/simharness | simharness2/rewards/base_reward.py | [
{
"identifier": "ReactiveAgent",
"path": "simharness2/agents/agent.py",
"snippet": "class ReactiveAgent:\n \"\"\"A simple agent that reacts to its environment.\n\n FIXME: update docstring style, using llama2 suggestion for now.\n Parameters\n ----------\n agent_id : int\n The unique ID of this agent.\n sim_id : int\n The unique ID of the simulation this agent belongs to.\n initial_position : tuple[int, int]\n The (x,y) starting position of the agent, where (0,0) is the top-left corner of\n the map and (max_x, max_y) is the bottom-right corner of the map.\n\n Properties\n ----------\n x : int\n The current X coordinate of the agent.\n y : int\n The current Y coordinate of the agent.\n row : int\n The current row number where the agent resides.\n col : int\n The current column number where the agent resides.\n latest_movement : str or None\n The last movement made by the agent, if applicable.\n latest_interaction : str or None\n The last interaction had by the agent, if applicable.\n mitigation_placed : bool\n Whether the agent has placed any mitigations recently.\n moved_off_map : bool\n Whether the agent has moved off the map recently.\n\n \"\"\"\n\n # NOTE: `agent_speed` ommitted, only used within `_do_one_simulation_step`\n # Attrs that should be specified on initialization\n agent_id: Any # ex: \"agent_0\", \"dozer_0\", \"handcrew_0\", \"ff_0\", etc.\n sim_id: int # should be contained within sim.agents.keys()\n initial_position: Tuple[int, int]\n\n # Attributes with default values\n latest_movement: int = None\n latest_interaction: int = None\n mitigation_placed: bool = False\n moved_off_map: bool = False\n\n def __post_init__(self):\n self._current_position = self.initial_position\n self.x, self.y = self.initial_position\n self.row, self.col = self.y, self.x\n\n @property\n def current_position(self) -> Tuple[int, int]:\n return self._current_position\n\n @current_position.setter\n def current_position(self, value: Tuple[int, int]):\n self._current_position = value\n self.x, self.y = value\n self.row, self.col = self.y, self.x\n\n @property\n def x(self) -> int:\n return self._current_position[0]\n\n @x.setter\n def x(self, value: int):\n self._current_position = (value, self.y)\n\n @property\n def y(self) -> int:\n return self._current_position[1]\n\n @y.setter\n def y(self, value: int):\n self._current_position = (self.x, value)\n\n @property\n def row(self) -> int:\n return self._current_position[1]\n\n @row.setter\n def row(self, value: int):\n self._current_position = (self.x, value)\n\n @property\n def col(self) -> int:\n return self._current_position[0]\n\n @col.setter\n def col(self, value: int):\n self._current_position = (value, self.y)\n\n def reset(self):\n self.latest_movement = None\n self.latest_interaction = None\n self.mitigation_placed = False\n self.moved_off_map = False\n self.__post_init__()\n # self.current_position = self.initial_position\n # self.reward = 0\n\n # def move(self, env: np.ndarray, direction: int) -> bool:\n # \"\"\"Moves the agent in the given direction if possible.\"\"\"\n # current_x, current_y = self.current_position\n # dx, dy = self.actions[direction]\n # next_x, next_y = current_x + dx, current_y + dy\n\n # if env[next_y][next_x] == \"_\":\n # self.current_position = (next_x, next_y)\n # return True\n # else:\n # return False"
},
{
"identifier": "ReactiveHarnessAnalytics",
"path": "simharness2/analytics/harness_analytics.py",
"snippet": "class ReactiveHarnessAnalytics(RLHarnessAnalytics):\n \"\"\"TODO Add description.\"\"\"\n\n def __init__(\n self,\n *,\n sim: FireSimulation,\n sim_analytics_partial: partial,\n agent_ids: set,\n benchmark_sim: FireSimulation = None,\n ) -> None:\n \"\"\"TODO Add summary line.\n\n Arguments:\n sim: The underlying `FireSimulation` object that contains the agent (s) that\n are being trained. The agent (s) will place mitigation lines, and the\n simulation will spread the fire. An episode terminates when the fire is\n finished spreading.\n sim_analytics_partial: A `functools.partial` object that defines the class\n that willbbe used to monitor and track `self.sim`, and\n `self.benchmark_sim`, if the optional `benchmark_sim` is provided. The\n user is expected to provide the `agent_analytics_partial` keyword\n argument, along with a valid value.\n agent_ids: TODO\n benchmark_sim: A separate `FireSimulation` object, identical to\n `sim` (after initialization). No mitigation lines will be placed in this\n simulation, as it does not contain any agent (s).\n\n Raises:\n TypeError: If `sim_analytics_partial.keywords` does not contain a\n `agent_analytics_partial` key with value of type `functools.partial`.\n\n \"\"\"\n # NOTE: Below is a hacky way to specify agent ids; Fix later\n # Inject `agent_ids` into keywords of `agent_analytics_partial`\n agent_partial: partial = sim_analytics_partial.keywords[\"agent_analytics_partial\"]\n agent_partial.keywords.update({\"agent_ids\": agent_ids})\n sim_analytics_partial.keywords[\"agent_analytics_partial\"] = agent_partial\n # Initialize sim_analytics object (s) and best_episode_performance attribute.\n super().__init__(\n sim=sim,\n sim_analytics_partial=sim_analytics_partial,\n benchmark_sim=benchmark_sim,\n )\n\n # Define attributes that are needed/accessed within `ComprehensiveReward` class.\n # TODO: Address where these attributes should be stored, see\n # https://gitlab.mitre.org/fireline/reinforcementlearning/simharness2/-/merge_requests/6#note_1504742\n\n if self.benchmark_sim_analytics:\n # track the existence of the benchmark sim to generate the comparative (ex. area saved or burn rate reduction) metrics\n self.sim_analytics.benchmark_exists = True\n\n # Track the latest episode reward\n # TODO is this the reward for the latest timestep or the latest episode?\n # FIXME: Decide how and where this attribute is/should be used.\n self.latest_reward = 0.0\n\n self.episodes_total = 0\n\n def update_after_one_agent_step(\n self,\n *,\n timestep: int,\n agents: Dict[Any, ReactiveAgent],\n ) -> None:\n \"\"\"Updates `self.sim_analytics.agent_analytics`, if agents are in the sim.\n\n This method is intended to be called directly after the call to\n `ReactiveHarness._do_one_agent_step()` (within `ReactiveHarness.step()`).\n\n Arguments:\n sim: The underlying `FireSimulation` object that contains the agent (s) that\n are being trained. The agent (s) will place mitigation lines, and the\n simulation will spread the fire. An episode terminates when the fire is\n finished spreading. (FIXME later)\n timestep: An integer indicating the current timestep of the episode.\n agents: TODO\n \"\"\"\n if self.sim_analytics.agent_analytics:\n self.sim_analytics.agent_analytics.update(timestep, agents)\n\n def update_after_one_simulation_step(self, *, timestep: int) -> None:\n \"\"\"Updates `self.sim_analytics` (and `self.benchmark_sim_analytics`, if exists).\n\n This method is intended to be called directly after the call to\n `ReactiveHarness._do_one_simulation_step()` (within `ReactiveHarness.step()`).\n\n Arguments:\n timestep: An integer indicating the current timestep of the episode.\n \"\"\"\n sim_area = self.sim_analytics.sim.fire_map.size\n\n if self.sim_analytics.benchmark_exists:\n # update the sim metrics with comparison metrics that use the benchmark sim in sim_analytics\n self.sim_analytics.update(\n timestep, benchmark_data=self.benchmark_sim_analytics.data.damaged\n )\n else:\n self.sim_analytics.update(timestep)\n\n def update_bench_after_one_simulation_step(self, *, timestep: int) -> None:\n \"\"\"Updates `self.benchmark_sim_analytics`, if exists.\n\n This method is intended to be called at the beginning of each episode in\n ReactiveHarness.\n\n Arguments:\n timestep: An integer indicating the current timestep of the episode.\n \"\"\"\n\n if self.benchmark_sim_analytics:\n self.benchmark_sim_analytics.update(timestep)\n\n return\n\n def update_after_one_harness_step(\n self, sim_run: bool, terminated: bool, reward: float, *, timestep: int\n ) -> None:\n \"\"\"Update the analytics after one step in the harness.\n\n Args:\n sim_run (bool): [description]\n terminated (bool): [description]\n reward (float): [description]\n timestep (int): [description]\n \"\"\"\n # Reset any attributes that monitor agent behavior between each simulation step.\n if sim_run and self.sim_analytics.agent_analytics:\n self.sim_analytics.agent_analytics.reset_after_one_simulation_step()\n\n # Once episode has terminated, check if episode performance is the best so far.\n if terminated:\n self.episodes_total += 1\n\n current_unburned = self.sim_analytics.data.unburned\n update_best_episode_performance = True\n if self.best_episode_performance:\n max_unburned = self.best_episode_performance.max_unburned\n if current_unburned <= max_unburned:\n update_best_episode_performance = False\n\n if update_best_episode_performance:\n self.best_episode_performance = BestEpisodePerformance(\n max_unburned=current_unburned,\n sim_area=self.sim_analytics.sim.fire_map.size,\n num_sim_steps=self.sim_analytics.num_sim_steps,\n episode=self.episodes_total,\n reward=reward,\n )\n perf = self.best_episode_performance\n logger.info(f\"Episode {self.episodes_total}: {perf}\")\n\n def reset(self, env_is_rendering: bool = False):\n \"\"\"Resets attributes that track data within each episode.\n\n This method is intended to be called within after the call to\n `ReactiveHarness._do_one_agent_step()` (within `ReactiveHarness.step()`).\n\n \"\"\"\n\n self.sim_analytics.reset(env_is_rendering)\n if self.benchmark_sim_analytics is not None:\n self.sim_analytics.benchmark_exists = True\n\n if self.benchmark_sim_analytics:\n self.benchmark_sim_analytics.reset(env_is_rendering)\n\n def save_sim_history(self, logdir: str, total_iters: int) -> None:\n \"\"\"TODO Add docstring.\"\"\"\n self.sim_analytics.data.save_episode_history(logdir, total_iters)\n\n if self.benchmark_sim_analytics:\n self.benchmark_sim_analytics.data.save_episode_history(logdir, total_iters)\n\n # def log_dfs(self):\n # \"\"\"Log the dataframes that are being tracked by the analytics.\"\"\"\n # logger.info(\"sim_analytics.df\")\n # logger.info(self.sim_analytics.df.to_markdown())\n # if self.benchmark_sim_analytics:\n # logger.info(\"benchmark_sim_analytics.df\")\n # logger.info(self.benchmark_sim_analytics.df.to_markdown())\n\n # if self.sim_analytics.agent_analytics:\n # logger.info(\"sim_analytics.agent_analytics.df\")\n # logger.info(self.sim_analytics.agent_analytics.df.to_markdown())"
}
] | import logging
from abc import ABC, abstractmethod
from typing import Any, Dict
from simharness2.agents.agent import ReactiveAgent
from simharness2.analytics.harness_analytics import ReactiveHarnessAnalytics | 3,086 | """Base Reward Class for representing the modular reward function.
Reward Classes to be called in the main environment that derive rewards from the
ReactiveHarnessAnalytics object.
"""
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
)
logger.addHandler(handler)
logger.propagate = False
class BaseReward(ABC):
"""Abstract Class for Reward_Class template with the update functions implemented."""
| """Base Reward Class for representing the modular reward function.
Reward Classes to be called in the main environment that derive rewards from the
ReactiveHarnessAnalytics object.
"""
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
)
logger.addHandler(handler)
logger.propagate = False
class BaseReward(ABC):
"""Abstract Class for Reward_Class template with the update functions implemented."""
| def __init__(self, harness_analytics: ReactiveHarnessAnalytics): | 1 | 2023-12-08 19:13:31+00:00 | 4k |
racinette/querky | querky/backends/postgresql/name_type_mapper.py | [
{
"identifier": "TypeKnowledge",
"path": "querky/base_types.py",
"snippet": "class TypeKnowledge(GetImportsMixin):\n metadata: TypeMetaData\n is_array: bool\n is_optional: bool | None\n elem_is_optional: bool | None = None\n typehint: str | None = None\n userhint: typing.Any | None = None\n required_imports: set[str] | None = None\n\n def __post_init__(self):\n self.set_userhint(self.userhint)\n\n def set_userhint(self, userhint: typing.Any):\n if userhint is None or userhint is inspect._empty:\n # пользователь не предоставил аннотацию\n return\n if userhint == typing.Optional:\n # пользователь явно указал, что этот аргумент опционален\n self.is_optional = True\n elif isinstance(userhint, str):\n # пользователь явно указал аннотацию - мы будем использовать ее прямо в таком же виде, как указано\n self.userhint = userhint\n self.typehint = self.userhint\n else:\n raise NotImplementedError(\n \"Type annotation is a live object.\\n\"\n \"It is impossible to copy safely between files.\\n\"\n \"Placing it between parenthesis, thus making it a raw string, should do the trick.\\n\"\n \"If you need to import something inside the generated file for this annotation to work, \"\n \"use `__imports__ = [<your imports as raw strings>]` in your source file.\"\n )\n\n def get_imports(self) -> set[str]:\n s = self.metadata.get_imports()\n if self.required_imports is not None:\n s.update(self.required_imports)\n return s\n\n def add_import(self, s: str) -> None:\n if self.required_imports is None:\n self.required_imports = set()\n self.required_imports.add(s)"
},
{
"identifier": "TypeMetaData",
"path": "querky/base_types.py",
"snippet": "class TypeMetaData(GetImportsMixin):\n counterpart: str\n required_imports: set[str] | None = None\n\n def get_imports(self) -> set[str]:\n if self.required_imports is None:\n return set()\n return set(self.required_imports)\n\n @classmethod\n def from_type(cls, t: typing.Type) -> TypeMetaData:\n type_name = t.__name__\n module_path = t.__module__\n return TypeMetaData(\n counterpart=type_name,\n required_imports={f\"from {module_path} import {type_name}\"}\n )"
},
{
"identifier": "Contract",
"path": "querky/contract.py",
"snippet": "class Contract(ABC):\n @abstractmethod\n def create_param_mapper(self, query: Query) -> ParamMapper:\n ...\n\n @abstractmethod\n def get_default_record_type_metadata(self) -> TypeMetaData:\n ...\n\n @abstractmethod\n def get_connection_type_metadata(self) -> TypeMetaData:\n ...\n\n @abstractmethod\n async def get_query_signature(self, db, query: Query) -> QuerySignature:\n ...\n\n @abstractmethod\n def get_query_signature_sync(self, db, query: Query) -> QuerySignature:\n ...\n\n @abstractmethod\n def is_async(self) -> bool:\n ...\n\n @abstractmethod\n async def fetch_value(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_one(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_all(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_column(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_status(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def raw_execute(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetchval(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetchone(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetch(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n def fetch_value_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_one_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_all_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_column_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_status_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def raw_execute_sync(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetchval_sync(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n def raw_fetchone_sync(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n def raw_fetch_sync(self, conn, sql: str, params):\n ..."
},
{
"identifier": "PostgresqlTypeMapper",
"path": "querky/backends/postgresql/type_mapper.py",
"snippet": "class PostgresqlTypeMapper(ABC):\n @abstractmethod\n async def get_type_knowledge(self, contract: Contract, conn, oid: int) -> TypeKnowledge:\n ...\n\n @abstractmethod\n def get_type_knowledge_sync(self, contract: Contract, conn, oid: int) -> TypeKnowledge:\n ..."
}
] | from querky.base_types import TypeKnowledge, TypeMetaData
from querky.contract import Contract
from querky.backends.postgresql.type_mapper import PostgresqlTypeMapper | 1,747 |
GET_PG_TYPE_SQL_QUERY = """
SELECT
oid::regtype::TEXT AS type_string,
typnamespace::regnamespace::TEXT AS namespace_string
FROM
pg_type
WHERE
oid = $1
"""
class PostgresqlNameTypeMapper(PostgresqlTypeMapper):
def __init__(self, typemap: dict[str, dict[str, TypeMetaData]]):
self.type_cache = dict()
# копируем
self.typemap = {
schema_name: {
type_name: type_metadata
for type_name, type_metadata in schema_map.items()
}
for schema_name, schema_map in typemap.items()
}
def set_mapping(self, schema: str, type_name: str, metadata: TypeMetaData) -> None:
if schema not in self.typemap:
self.typemap[schema] = dict()
s = self.typemap[schema]
s[type_name] = metadata
async def get_pg_type(self, contract: Contract, conn, oid: int):
if (pg_type := self.type_cache.get(oid, None)) is None:
pg_type = await contract.raw_fetchone(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))
self.type_cache[pg_type] = pg_type
return pg_type
def get_pg_type_sync(self, contract: Contract, conn, oid: int):
if (pg_type := self.type_cache.get(oid, None)) is None:
pg_type = contract.raw_fetchone_sync(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))
self.type_cache[pg_type] = pg_type
return pg_type
|
GET_PG_TYPE_SQL_QUERY = """
SELECT
oid::regtype::TEXT AS type_string,
typnamespace::regnamespace::TEXT AS namespace_string
FROM
pg_type
WHERE
oid = $1
"""
class PostgresqlNameTypeMapper(PostgresqlTypeMapper):
def __init__(self, typemap: dict[str, dict[str, TypeMetaData]]):
self.type_cache = dict()
# копируем
self.typemap = {
schema_name: {
type_name: type_metadata
for type_name, type_metadata in schema_map.items()
}
for schema_name, schema_map in typemap.items()
}
def set_mapping(self, schema: str, type_name: str, metadata: TypeMetaData) -> None:
if schema not in self.typemap:
self.typemap[schema] = dict()
s = self.typemap[schema]
s[type_name] = metadata
async def get_pg_type(self, contract: Contract, conn, oid: int):
if (pg_type := self.type_cache.get(oid, None)) is None:
pg_type = await contract.raw_fetchone(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))
self.type_cache[pg_type] = pg_type
return pg_type
def get_pg_type_sync(self, contract: Contract, conn, oid: int):
if (pg_type := self.type_cache.get(oid, None)) is None:
pg_type = contract.raw_fetchone_sync(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))
self.type_cache[pg_type] = pg_type
return pg_type
| def get_type_knowledge_impl(self, pg_type) -> TypeKnowledge: | 0 | 2023-12-13 15:16:34+00:00 | 4k |
RokasEl/mace-mp-umap | mace_mp_umap/cli.py | [
{
"identifier": "find_closest_training_points",
"path": "mace_mp_umap/analysis.py",
"snippet": "def find_closest_training_points(training_df, test_df):\n structure_groups = test_df.groupby(\"structure_index\")\n training_descriptors = np.vstack(\n training_df[\"descriptor\"]\n ) # num_many_atoms x num_features\n training_norms = np.linalg.norm(training_descriptors, axis=1)\n mp_ids = training_df[\"mp_id\"].values\n unique_mp_ids = np.unique(mp_ids)\n results = []\n print(\"Finding closest training points\")\n for structure_index, structure_df in tqdm(structure_groups):\n structure_descriptors = np.vstack(\n structure_df[\"descriptor\"]\n ) # num_atoms x num_features\n structure_similarities = np.dot(\n structure_descriptors, training_descriptors.T\n ) # num_atoms x num_training_atoms\n structure_similarities /= np.linalg.norm(structure_descriptors, axis=1)[:, None]\n structure_similarities /= training_norms[None, :]\n elements = np.unique(structure_df[\"element\"].values)\n for mp_id in unique_mp_ids:\n mp_id_mask = mp_ids == mp_id\n mp_id_similarities = structure_similarities[:, mp_id_mask]\n mp_id_similarities = np.max(mp_id_similarities, axis=1)\n per_element_average_similarities = [\n np.mean(mp_id_similarities[structure_df[\"element\"] == x])\n for x in elements\n ]\n per_element_results = dict(zip(elements, per_element_average_similarities))\n results.append(\n {\n \"structure_index\": structure_index,\n \"mp_id\": mp_id,\n \"average_similarity\": np.mean(mp_id_similarities),\n \"element_stratified_average_similarity\": np.mean(\n per_element_average_similarities\n ),\n }\n | per_element_results\n ) # type: ignore\n return pd.DataFrame(results).sort_values(\n by=[\"structure_index\", \"element_stratified_average_similarity\"],\n ascending=[True, False],\n )"
},
{
"identifier": "write_chemiscope_input",
"path": "mace_mp_umap/chemiscope_handling.py",
"snippet": "def write_chemiscope_input(train_atoms, test_atoms, reducers, system_name):\n all_atoms = train_atoms + test_atoms\n write_id_match_csv(train_atoms, test_atoms, system_name)\n (descriptors, symbols, groups, periods, neighbours) = get_atomic_properties(\n all_atoms\n )\n pca, umap_emb = get_reduced_embeddings(reducers, descriptors)\n train_test_split = get_train_test_split(train_atoms, test_atoms)\n properties = [\n create_property(\"0_UMAP\", umap_emb, \"UMAP Embeddings\"),\n create_property(\"PCA\", pca, \"PCA Embeddings\"),\n create_property(\"TrainTest\", train_test_split, \"Train/Test split\"),\n create_property(\"element\", symbols, \"Atomic element\"),\n create_property(\"group\", groups, \"Group\"),\n create_property(\"period\", periods, \"Period\"),\n ]\n if neighbours is not None:\n properties.append(\n create_property(\"num_neighbours\", neighbours, \"Number of neighbours\")\n )\n properties = {k: v for d in properties for k, v in d.items()}\n # define better default settings for the viewer\n settings = {\n \"map\": {\"color\": {\"property\": \"TrainTest\"}, \"palette\": \"brg\"},\n \"structure\": [{\"atomLabels\": True}],\n }\n chemiscope.write_input(\n path=f\"{system_name}_chemiscope_input.json\",\n frames=all_atoms,\n properties=properties,\n settings=settings,\n # This is required to display properties with `target: \"atom\"`\n environments=chemiscope.all_atomic_environments(all_atoms),\n )"
},
{
"identifier": "get_cleaned_dataframe",
"path": "mace_mp_umap/data_manipulations.py",
"snippet": "def get_cleaned_dataframe(\n path: str,\n calc: MACECalculator,\n element_subset: list[str],\n element_cutoffs: dict,\n filtering_type: str,\n):\n data = aio.read(path, index=\":\", format=\"extxyz\")\n print(f\"Loaded {len(data)} structures\")\n filtered_data = list(\n filter(lambda x: filter_atoms(x, element_subset, filtering_type), tqdm(data))\n )\n print(f\"Filtered to {len(filtered_data)} structures\")\n calculate_descriptors(filtered_data, calc, element_cutoffs)\n df = convert_to_dataframe(filtered_data)\n df = remove_non_unique_environments(df)\n return filtered_data, df"
},
{
"identifier": "apply_dimensionality_reduction",
"path": "mace_mp_umap/dim_reduction.py",
"snippet": "def apply_dimensionality_reduction(\n df: pd.DataFrame, tag: str, feature_slice: slice, umap_reducer, pca_reducer\n) -> None:\n descriptors = np.vstack(df[\"descriptor\"])[:, feature_slice]\n embeddings = umap_reducer.transform(descriptors)\n embeddings_pca = pca_reducer.transform(descriptors)\n df[f\"{tag}_umap_1\"] = embeddings[:, 0]\n df[f\"{tag}_umap_2\"] = embeddings[:, 1]\n df[f\"{tag}_pca_1\"] = embeddings_pca[:, 0]\n df[f\"{tag}_pca_2\"] = embeddings_pca[:, 1]"
},
{
"identifier": "fit_dimensionality_reduction",
"path": "mace_mp_umap/dim_reduction.py",
"snippet": "def fit_dimensionality_reduction(\n df: pd.DataFrame, tag: str, feature_slice: slice, random_state: int = 42\n):\n umap_reducer, pca_reducer = get_reducers(random_state)\n descriptors = np.vstack(df[\"descriptor\"])[:, feature_slice]\n embeddings = umap_reducer.fit_transform(descriptors)\n embeddings_pca = pca_reducer.fit_transform(descriptors)\n df[f\"{tag}_umap_1\"] = embeddings[:, 0]\n df[f\"{tag}_umap_2\"] = embeddings[:, 1]\n df[f\"{tag}_pca_1\"] = embeddings_pca[:, 0]\n df[f\"{tag}_pca_2\"] = embeddings_pca[:, 1]\n return umap_reducer, pca_reducer"
},
{
"identifier": "plot_dimensionality_reduction",
"path": "mace_mp_umap/plotting.py",
"snippet": "def plot_dimensionality_reduction(\n training_data_df: pd.DataFrame, test_data_df: pd.DataFrame, num_layers: int\n) -> plt.Figure:\n fig, axes = plt.subplots(num_layers, 2, figsize=(12, 8 * num_layers))\n colors, norm = get_colors_for_training_data(training_data_df, test_data_df)\n if norm is not None:\n cbar_ax = fig.add_axes([0.2, 0.95, 0.6, 0.02])\n sm = plt.cm.ScalarMappable(cmap=\"cividis\", norm=norm)\n sm.set_array([])\n fig.colorbar(sm, cax=cbar_ax, orientation=\"horizontal\")\n cbar_ax.set_title(\"num_neighbours\", size=10, loc=\"left\")\n\n for i in range(num_layers):\n tag = f\"layer_{i}\"\n ax = axes[i]\n ax[0].scatter(\n training_data_df[f\"{tag}_umap_1\"],\n training_data_df[f\"{tag}_umap_2\"],\n s=30,\n alpha=0.8,\n c=colors,\n rasterized=True,\n )\n ax[1].scatter(\n training_data_df[f\"{tag}_pca_1\"],\n training_data_df[f\"{tag}_pca_2\"],\n s=30,\n alpha=0.8,\n c=colors,\n rasterized=True,\n )\n ax[0].set_title(f\"UMAP {tag}\")\n ax[1].set_title(f\"PCA {tag}\")\n\n if test_data_df is not None:\n for i in range(num_layers):\n tag = f\"layer_{i}\"\n ax = axes[i]\n ax[0].scatter(\n test_data_df[f\"{tag}_umap_1\"],\n test_data_df[f\"{tag}_umap_2\"],\n s=30,\n alpha=0.8,\n c=\"none\",\n edgecolors=\"red\",\n linewidths=2,\n rasterized=True,\n )\n ax[1].scatter(\n test_data_df[f\"{tag}_pca_1\"],\n test_data_df[f\"{tag}_pca_2\"],\n s=30,\n alpha=0.8,\n c=\"none\",\n edgecolors=\"red\",\n linewidths=2,\n rasterized=True,\n )\n\n return fig"
},
{
"identifier": "get_layer_specific_feature_slices",
"path": "mace_mp_umap/utils.py",
"snippet": "def get_layer_specific_feature_slices(calc: MACECalculator) -> list[slice]:\n num_layers = calc.models[0].num_interactions\n irreps_out = calc.models[0].products[0].linear.__dict__[\"irreps_out\"]\n l_max = irreps_out.lmax\n features_per_layer = irreps_out.dim // (l_max + 1) ** 2\n slices = [slice(0, (i + 1) * features_per_layer) for i in range(num_layers)]\n return slices"
}
] | import pathlib
import typing as t
import typer
import warnings
import torch
from collections import defaultdict
from mace.calculators import mace_mp
from typing_extensions import Annotated
from enum import Enum
from .analysis import find_closest_training_points
from .chemiscope_handling import write_chemiscope_input
from .data_manipulations import get_cleaned_dataframe
from .dim_reduction import (
apply_dimensionality_reduction,
fit_dimensionality_reduction,
)
from .plotting import plot_dimensionality_reduction
from .utils import get_layer_specific_feature_slices | 2,742 |
app = typer.Typer()
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class FilterType(str, Enum):
exclusive = "exclusive"
inclusive = "inclusive"
combinations = "combinations"
none = "none"
@app.command()
def produce_mace_chemiscope_input(
data_path: str = typer.Argument(
default=None,
help="Path to XYZ file containing your system",
),
mp_data_path: str = typer.Argument(default=None, help="Path to MP data"),
filtering: FilterType = typer.Option(
default=FilterType.none,
case_sensitive=False,
help="Whether to filter out structures that contain elements not in the subset or to include them.",
),
element_subset: Annotated[
t.List[str],
typer.Option(
"--add-element", "-e", help="List of elements to include in the subset."
),
] = [],
create_plots: bool = typer.Option(
default=False, help="Whether to create static UMAP and PCA plots."
),
):
if DEVICE != "cuda":
warnings.warn("CUDA not available, using CPU. Might be slow.")
if filtering == FilterType.none:
raise ValueError(
"You must specify filtering type (either `--filtering exclusive` or `--filtering inclusive`).\n"
"Combinations mode means that structures are kept if they're composed only of elements supplied via `-e` flags but don't need to contail all of the supplied elements.\n"
"Exclusive mode means those and only those structures are kept that contail all elements supplied via `-e` flags. This is a subset of `combinations`\n"
"Inclusive mode means that other elements are allowed in addition to those supplied via `-e` flags.\n"
"Most applications should use `--filtering inclusive`. However, for elemental compounds or molecular compounds like water `exclusive` or `combinations` modes are more appropriate."
)
# Load model
calc = mace_mp(
model="medium",
device=DEVICE,
default_dtype="float64",
)
print(
f"Using the MACE cutoff ({calc.r_max} A) for neighbour analysis for all elements."
)
cutoff_dict = defaultdict(lambda: calc.r_max)
# Load MP data
|
app = typer.Typer()
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class FilterType(str, Enum):
exclusive = "exclusive"
inclusive = "inclusive"
combinations = "combinations"
none = "none"
@app.command()
def produce_mace_chemiscope_input(
data_path: str = typer.Argument(
default=None,
help="Path to XYZ file containing your system",
),
mp_data_path: str = typer.Argument(default=None, help="Path to MP data"),
filtering: FilterType = typer.Option(
default=FilterType.none,
case_sensitive=False,
help="Whether to filter out structures that contain elements not in the subset or to include them.",
),
element_subset: Annotated[
t.List[str],
typer.Option(
"--add-element", "-e", help="List of elements to include in the subset."
),
] = [],
create_plots: bool = typer.Option(
default=False, help="Whether to create static UMAP and PCA plots."
),
):
if DEVICE != "cuda":
warnings.warn("CUDA not available, using CPU. Might be slow.")
if filtering == FilterType.none:
raise ValueError(
"You must specify filtering type (either `--filtering exclusive` or `--filtering inclusive`).\n"
"Combinations mode means that structures are kept if they're composed only of elements supplied via `-e` flags but don't need to contail all of the supplied elements.\n"
"Exclusive mode means those and only those structures are kept that contail all elements supplied via `-e` flags. This is a subset of `combinations`\n"
"Inclusive mode means that other elements are allowed in addition to those supplied via `-e` flags.\n"
"Most applications should use `--filtering inclusive`. However, for elemental compounds or molecular compounds like water `exclusive` or `combinations` modes are more appropriate."
)
# Load model
calc = mace_mp(
model="medium",
device=DEVICE,
default_dtype="float64",
)
print(
f"Using the MACE cutoff ({calc.r_max} A) for neighbour analysis for all elements."
)
cutoff_dict = defaultdict(lambda: calc.r_max)
# Load MP data | train_atoms, training_data_df = get_cleaned_dataframe( | 2 | 2023-12-09 10:08:26+00:00 | 4k |
Shahzadnit/EZ-CLIP | clip/clip.py | [
{
"identifier": "build_model",
"path": "clip/model.py",
"snippet": "def build_model(state_dict: dict, config, tsm=False,T=8,dropout=0., joint=False,emb_dropout=0.,pretrain=True):\n vit = \"visual.proj\" in state_dict\n\n if vit:\n vision_width = state_dict[\"visual.conv1.weight\"].shape[0]\n vision_layers = len([k for k in state_dict.keys() if k.startswith(\"visual.\") and k.endswith(\".attn.in_proj_weight\")])\n vision_patch_size = state_dict[\"visual.conv1.weight\"].shape[-1]\n grid_size = round((state_dict[\"visual.positional_embedding\"].shape[0] - 1) ** 0.5)\n image_resolution = vision_patch_size * grid_size\n else:\n counts: list = [len(set(k.split(\".\")[2] for k in state_dict if k.startswith(f\"visual.layer{b}\"))) for b in [1, 2, 3, 4]]\n vision_layers = tuple(counts)\n \n vision_width = state_dict[\"visual.layer1.0.conv1.weight\"].shape[0]\n output_width = round((state_dict[\"visual.attnpool.positional_embedding\"].shape[0] - 1) ** 0.5)\n vision_patch_size = None\n assert output_width ** 2 + 1 == state_dict[\"visual.attnpool.positional_embedding\"].shape[0]\n image_resolution = output_width * 32\n\n embed_dim = state_dict[\"text_projection\"].shape[1]\n context_length = state_dict[\"positional_embedding\"].shape[0]\n vocab_size = state_dict[\"token_embedding.weight\"].shape[0]\n transformer_width = state_dict[\"ln_final.weight\"].shape[0]\n transformer_heads = transformer_width // 64\n transformer_layers = len(set(k.split(\".\")[2] for k in state_dict if k.startswith(f\"transformer.resblocks\")))\n \n model = CLIP(\n embed_dim,config,\n image_resolution, vision_layers, vision_width, vision_patch_size,T,\n context_length, vocab_size, transformer_width, transformer_heads, transformer_layers, tsm=tsm,T=T,joint=joint,\n dropout=dropout, emb_dropout=emb_dropout\n )\n\n for key in [\"input_resolution\", \"context_length\", \"vocab_size\"]:\n if key in state_dict:\n del state_dict[key]\n if tsm:\n for k in list(state_dict.keys()):\n if k.find(\"conv1\")>-1 and k.find(\"layer\")>-1: \n n_k = k.split('conv1.')[0]+'conv1.net.'+k.split('conv1.')[1]\n state_dict[n_k] = state_dict.pop(k)\n if k.find(\"resblocks\")>-1 and k.find(\"visual\")>-1: \n tmp = ''\n for i, t_ in enumerate(k.split('resblocks.')[1].split('.')):\n if i>=1:\n tmp += '.' + t_ \n \n n_k = k.split('resblocks.')[0]+'resblocks.' + k.split('resblocks.')[1].split('.')[0]+'.net'+ tmp\n# print(n_k)\n state_dict[n_k] = state_dict.pop(k)\n\n convert_weights(model)\n if pretrain:\n print('loading clip pretrained model!')\n if joint: #or emb_dropout>0 or dropout>0\n model.load_state_dict(state_dict,strict=False)\n else:\n model.load_state_dict(state_dict, strict=False)\n else:\n print('not using full clip pretrained model, only visual!')\n \n for k in list(state_dict.keys()):\n if not k.find(\"visual\")>-1: \n state_dict.pop(k)\n\n model.load_state_dict(state_dict,strict=False)\n\n return model.eval()"
},
{
"identifier": "SimpleTokenizer",
"path": "clip/simple_tokenizer.py",
"snippet": "class SimpleTokenizer(object):\n def __init__(self, bpe_path: str = default_bpe()):\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n merges = gzip.open(bpe_path).read().decode(\"utf-8\").split('\\n')\n merges = merges[1:49152-256-2+1]\n merges = [tuple(merge.split()) for merge in merges]\n vocab = list(bytes_to_unicode().values())\n vocab = vocab + [v+'</w>' for v in vocab]\n for merge in merges:\n vocab.append(''.join(merge))\n vocab.extend(['<|startoftext|>', '<|endoftext|>'])\n self.encoder = dict(zip(vocab, range(len(vocab))))\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}\n self.pat = re.compile(r\"\"\"<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\", re.IGNORECASE)\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token[:-1]) + ( token[-1] + '</w>',)\n pairs = get_pairs(word)\n\n if not pairs:\n return token+'</w>'\n\n while True:\n bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word)-1 and word[i+1] == second:\n new_word.append(first+second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n self.cache[token] = word\n return word\n\n def encode(self, text):\n bpe_tokens = []\n text = whitespace_clean(basic_clean(text)).lower()\n for token in re.findall(self.pat, text):\n token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))\n bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))\n return bpe_tokens\n\n def decode(self, tokens):\n text = ''.join([self.decoder[token] for token in tokens])\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=\"replace\").replace('</w>', ' ')\n return text"
}
] | import hashlib
import os
import urllib
import warnings
import torch
from typing import Union, List
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer | 2,794 |
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
# "ViT-E/16": "https://huggingface.co/QuanSun/EVA-CLIP/resolve/main/EVA02_CLIP_B_psz16_s8B.pt?download=true"
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, config, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True, tsm=False, joint=False,T=8,dropout=0., emb_dropout=0.,pretrain=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
|
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
# "ViT-E/16": "https://huggingface.co/QuanSun/EVA-CLIP/resolve/main/EVA02_CLIP_B_psz16_s8B.pt?download=true"
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, config, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True, tsm=False, joint=False,T=8,dropout=0., emb_dropout=0.,pretrain=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
| model = build_model(state_dict or model.state_dict(),config, joint=joint,tsm=tsm,T=T,dropout=dropout, emb_dropout=emb_dropout,pretrain=pretrain).to(device) | 0 | 2023-12-12 13:11:20+00:00 | 4k |
javrtg/C2P | nonmin_pose/models/base.py | [
{
"identifier": "ConstraintConfig",
"path": "nonmin_pose/constraints/constraint_manager.py",
"snippet": "class ConstraintManager:\n CONSTRAINT_CLASSES = {\n \"manif_def_left\": cnt.ManifDefLeft,\n \"manif_def_right\": cnt.ManifDefRight,\n \"norm_t\": cnt.NormT,\n \"norm_q\": cnt.NormQ,\n \"e_def_left\": cnt.EDefLeft,\n \"e_def_right\": cnt.EDefRight,\n \"e_def_left_right\": cnt.EDefLeftRight,\n \"homogenization\": cnt.Homogenization,\n \"adjoint\": cnt.Adjoint,\n \"norm_e\": cnt.NormE,\n \"right_null_space\": cnt.RightNullSpace,\n \"left_null_space\": cnt.LeftNullSpace,\n \"cheirality_translation\": cnt.CheiralityTranslation,\n \"cheirality_translation_v2\": cnt.CheiralityTranslationV2,\n \"cheirality_rotation\": cnt.CheiralityRotation,\n \"cheirality_rotation_q\": cnt.CheiralityRotationQ,\n \"cheirality_midpoint\": cnt.CheiralityMidpoint,\n \"orthogonality\": cnt.Orthogonality,\n \"determinant_r\": cnt.DeterminantR,\n \"t_q_definition\": cnt.TQDefinition,\n \"skew_t_q_definition\": cnt.SkewTQDefinition,\n \"convex_hull_so3\": cnt.ConvexHullSO3,\n }\n DYNAMIC_CONSTRAINTS = {\n \"cheirality_translation\",\n \"cheirality_translation_v2\",\n \"cheirality_rotation\",\n \"cheirality_rotation_q\",\n \"cheirality_midpoint\",\n }\n def __init__(\n self,\n parameters: List[Parameter],\n constraints: ConstraintConfig,\n use_top_k: Optional[int] = None,\n ) -> None:\n def compute_dynamic_coeffs(\n self, f0: np.ndarray, f1: np.ndarray, inliers_conf: Optional[np.ndarray] = None\n ):\n def available_constraints(self) -> List[str]:\ndef check_params_and_get_blocks(params: List[Parameter]) -> Dict[int, int]:"
},
{
"identifier": "Parameter",
"path": "nonmin_pose/constraints/constraints.py",
"snippet": "class Parameter:\n \"\"\"Class for defining a parameter.\n\n Attributes:\n name: e.g. E, R, t, etc. This MUST match the name being used on the constraints.\n block: 1-based index of the block.\n block_ids: 1-based index of each parameter element in the block.\n \"\"\"\n\n __slots__ = (\"name\", \"block\", \"block_ids\")\n\n def __init__(self, name: str, block: int, block_ids: List[int]):\n assert block > 0, \"block must be positive\"\n assert all(idx > 0 for idx in block_ids), \"block_id must be positive\"\n\n self.name = name\n self.block = block\n self.block_ids = block_ids"
},
{
"identifier": "compute_data_matrix_C",
"path": "nonmin_pose/utils.py",
"snippet": "def compute_data_matrix_C(\n f0: np.ndarray, f1: np.ndarray, w: Optional[np.ndarray] = None\n) -> np.ndarray:\n \"\"\"Compute the data matrix C from the bearing vectors.\n\n Args:\n f0: (3, n) bearing vectors in camera 0.\n f1: (3, n) bearing vectors in camera 1.\n w: (n,) array of weights for each epipolar residual. (default: None)\n\n Returns:\n C: (9, 9) data matrix.\n \"\"\"\n assert w is None or w.ndim == 1, \"w must be a 1D array.\"\n\n n = f0.shape[1]\n f0_kron_f1 = (n**-0.5) * (f0[:, None] * f1).reshape(9, n)\n\n if w is None:\n return f0_kron_f1 @ f0_kron_f1.T\n return w * f0_kron_f1 @ f0_kron_f1.T"
},
{
"identifier": "decompose_essmat",
"path": "nonmin_pose/utils.py",
"snippet": "def decompose_essmat(\n U: np.ndarray,\n Vt: np.ndarray,\n f0: np.ndarray,\n f1: np.ndarray,\n th_pure_rotation: float = 1 - 1e-8,\n) -> Tuple[np.ndarray, np.ndarray, bool]:\n \"\"\"Decompose the essential matrix into relative rotation and (normalized)\n translation.\n\n The extraction of the 4 possible relative pose factorizations given an essential\n matrix, follows the approach explained in [1, Sec. 9.6.2].\n To select the best pose candidate, we check the sign of the factor that\n multiplies each bearing vector. This factor must be positive since a bearing\n vector is equivalent to $f_i := X_i / ||X_i||$, where $X_i$ is the corresponding\n 3D point. Thus, to recover the 3D point, we multiply $f$ with an estimated\n scalar factor that *must* be positive. This constraint is independent of the\n camera model used (pinhole, fisheye, omnidirectional etc.), thus the camera\n model is not a limiting factor for this approach.\n To compute the scalar factor (the norm of X_i), we use the classic midpoint\n method (see e.g. [2]). However, instead of explicitly computing (triangulating)\n the 3D points, we just compute the sign of the scalar factors (lambdas). As a\n result, we save some computation. Specifically, we avoid computing:\n 1) the term (sin angle(f0, R01@f1))^2 = || f0 x R01@f1 ||^2, for each point, and\n 2) the XY coordinates of each 3D point.\n\n [1]: Multiple View Geometry in Computer Vision, Hartley and Zisserman, 2003.\n [2]: Triangulation: why optimize?, Lee and Civera, 2019.\n\n Args:\n U: (3, 3) left singular vectors of the essential matrix.\n Vt: (3, 3) right singular vectors of the essential matrix.\n f0: (3, n) bearing vectors in camera 0.\n f1: (3, n) bearing vectors in camera 1.\n th_pure_rotation: threshold for checking if the motion is a pure rotation.\n\n Returns:\n R: (3, 3) rotation matrix.\n t: (3, 1) translation vector.\n is_pure_rotation: True if a (near-)pure rotation is detected.\n \"\"\"\n # avoid reflection (ensure rotation) when decomposing the essential matrix.\n Vt[2] = -Vt[2] if np.linalg.det(U) * np.linalg.det(Vt) < 0 else Vt[2]\n\n Ra, Rb = U @ _W @ Vt # (2, 3, 3)\n ta, tb = U[:, 2:], -U[:, 2:]\n\n # check if it is a pure rotation.\n is_pure_rotation, choice = check_pure_rotation(\n f0, np.stack((Ra, Rb)) @ f1, th_pure_rotation\n )\n\n # (Ra, ta)\n Raf1 = Ra @ f1\n lambda0_rhs = (\n np.cross((Raf1).T, f0.T)[:, None] @ np.cross((Raf1).T, ta.T)[..., None]\n )\n lambda1_rhs = np.cross((Raf1).T, f0.T)[:, None] @ np.cross(f0.T, ta.T)[..., None]\n npos_aa = ((lambda0_rhs > 0) & (lambda1_rhs > 0)).sum()\n\n # (Rb, ta)\n Rbf1 = Rb @ f1\n lambda0_rhs = (\n np.cross((Rbf1).T, f0.T)[:, None] @ np.cross((Rbf1).T, ta.T)[..., None]\n )\n lambda1_rhs = np.cross((Rbf1).T, f0.T)[:, None] @ np.cross(f0.T, ta.T)[..., None]\n npos_ba = ((lambda0_rhs > 0) & (lambda1_rhs > 0)).sum()\n\n # (Ra, tb)\n lambda0_rhs = (\n np.cross((Raf1).T, f0.T)[:, None] @ np.cross((Raf1).T, tb.T)[..., None]\n )\n lambda1_rhs = np.cross((Raf1).T, f0.T)[:, None] @ np.cross(f0.T, tb.T)[..., None]\n npos_ab = ((lambda0_rhs > 0) & (lambda1_rhs > 0)).sum()\n\n # (Rb, tb)\n lambda0_rhs = (\n np.cross((Rbf1).T, f0.T)[:, None] @ np.cross((Rbf1).T, tb.T)[..., None]\n )\n lambda1_rhs = np.cross((Rbf1).T, f0.T)[:, None] @ np.cross(f0.T, tb.T)[..., None]\n npos_bb = ((lambda0_rhs > 0) & (lambda1_rhs > 0)).sum()\n\n npos_tpos = np.r_[npos_aa, npos_ba]\n npos_tneg = np.r_[npos_ab, npos_bb]\n\n if is_pure_rotation and (npos_tpos[choice] == npos_tneg[choice] == 0):\n # Pure rotation with perfect bearings alignment by just rotating them.\n R01 = Ra if choice == 0 else Rb\n return R01, ta, is_pure_rotation\n\n if is_pure_rotation:\n # Pure rotation with imperfect bearings alignment. Choose the translation\n # candidate that satisfies the most the positive-norm bearings' constraint.\n t01 = ta if npos_tpos[choice] >= npos_tneg[choice] else tb\n R01 = Ra if choice == 0 else Rb\n return R01, t01, is_pure_rotation\n\n # Otherwise, select the candidate that satisfies the most the positive-norm\n # bearings' constraint.\n choice, npos = max(\n enumerate((npos_tpos[0], npos_tpos[1], npos_tneg[0], npos_tneg[1])),\n key=lambda x: x[1],\n )\n\n t01 = ta if choice < 2 else tb\n R01 = Rb if choice % 2 else Ra\n\n return R01, t01, is_pure_rotation"
}
] | from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Tuple, Union
from nonmin_pose.constraints.constraint_manager import (
ConstraintConfig,
ConstraintManager,
)
from nonmin_pose.constraints.constraints import Parameter
from nonmin_pose.sdpa import SDPA
from nonmin_pose.utils import compute_data_matrix_C, decompose_essmat
import numpy as np | 2,817 |
class NonMinRelPoseBase(ABC):
"""Non-minimal Essential matrix estimation using SDPA solver."""
DEFAULT_CFG = {
# PARAMETER_STABLE_BUT_SLOW, PARAMETER_DEFAULT, PARAMETER_UNSTABLE_BUT_FAST
"sdpa_param_type": SDPA.PARAMETER_DEFAULT,
"th_rank_optimality": 1e-5,
"th_pure_rot_post": 1 - 1e-8, # for Zhao's and Garcia-Salguero's methods.
"th_pure_rot_sdp": 1e-3, # for C2P
"th_pure_rot_noisefree_sdp": 1e-4, # for C2P
# for computing the constraint coefficients that are determined at runtime.
"use_top_k": None,
}
SDP_COMPUTES_POSE: bool
def __init__(
self,
parameters: Optional[List[Parameter]] = None,
|
class NonMinRelPoseBase(ABC):
"""Non-minimal Essential matrix estimation using SDPA solver."""
DEFAULT_CFG = {
# PARAMETER_STABLE_BUT_SLOW, PARAMETER_DEFAULT, PARAMETER_UNSTABLE_BUT_FAST
"sdpa_param_type": SDPA.PARAMETER_DEFAULT,
"th_rank_optimality": 1e-5,
"th_pure_rot_post": 1 - 1e-8, # for Zhao's and Garcia-Salguero's methods.
"th_pure_rot_sdp": 1e-3, # for C2P
"th_pure_rot_noisefree_sdp": 1e-4, # for C2P
# for computing the constraint coefficients that are determined at runtime.
"use_top_k": None,
}
SDP_COMPUTES_POSE: bool
def __init__(
self,
parameters: Optional[List[Parameter]] = None, | constraints: Optional[ConstraintConfig] = None, | 0 | 2023-12-10 18:25:10+00:00 | 4k |
bluuewhale/nexon-openapi-python | src/nexon_openapi/utils/_transform.py | [
{
"identifier": "is_list",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_list(obj: object) -> TypeGuard[list[object]]:\n return isinstance(obj, list)"
},
{
"identifier": "is_mapping",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_mapping(obj: object) -> TypeGuard[Mapping[str, object]]:\n return isinstance(obj, Mapping)"
},
{
"identifier": "is_list_type",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_list_type(typ: type) -> bool:\n return (get_origin(typ) or typ) == list"
},
{
"identifier": "is_union_type",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_union_type(typ: type) -> bool:\n return _is_union(get_origin(typ))"
},
{
"identifier": "extract_type_arg",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def extract_type_arg(typ: type, index: int) -> type:\n args = get_args(typ)\n try:\n return cast(type, args[index])\n except IndexError as err:\n raise RuntimeError(f\"Expected type {typ} to have a type argument at index {index} but it did not\") from err"
},
{
"identifier": "is_required_type",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_required_type(typ: type) -> bool:\n return get_origin(typ) == Required"
},
{
"identifier": "is_annotated_type",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_annotated_type(typ: type) -> bool:\n return get_origin(typ) == Annotated"
},
{
"identifier": "strip_annotated_type",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def strip_annotated_type(typ: type) -> type:\n if is_required_type(typ) or is_annotated_type(typ):\n return strip_annotated_type(cast(type, get_args(typ)[0]))\n\n return typ"
},
{
"identifier": "model_dump",
"path": "src/nexon_openapi/_compat.py",
"snippet": "def model_dump(\n model: pydantic.BaseModel,\n *,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n) -> Dict[str, Any]:\n if PYDANTIC_V2:\n return model.model_dump(\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n )\n return cast(\n \"dict[str, Any]\",\n model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n ),\n )"
},
{
"identifier": "is_typeddict",
"path": "src/nexon_openapi/_compat.py",
"snippet": "def is_typeddict(type_: Type[Any]) -> bool: # noqa: ARG001\n ..."
}
] | from typing import Any, Mapping, Optional, TypeVar, Union, cast
from datetime import date, datetime
from typing_extensions import Literal, get_args, override, get_type_hints
from ._utils import (
is_list,
is_mapping,
is_list_type,
is_union_type,
extract_type_arg,
is_required_type,
is_annotated_type,
strip_annotated_type,
)
from .._compat import model_dump, is_typeddict
import pydantic | 1,674 | account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
"""
alias: Optional[str]
format: Optional[PropertyFormat]
format_template: Optional[str]
def __init__(
self,
*,
alias: Optional[str] = None,
format: Optional[PropertyFormat] = None,
format_template: Optional[str] = None,
) -> None:
self.alias = alias
self.format = format
self.format_template = format_template
@override
def __repr__(self) -> str:
return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}')"
def maybe_transform(
data: object,
expected_type: object,
) -> Optional[Any]:
"""Wrapper over `transform()` that allows `None` to be passed.
See `transform()` for more details.
"""
if data is None:
return None
return transform(data, expected_type)
# Wrapper over _transform_recursive providing fake types
def transform(
data: _T,
expected_type: object,
) -> _T:
"""Transform dictionaries based off of type information from the given type, for example:
```py
class Params(TypedDict, total=False):
card_id: Required[Annotated[str, PropertyInfo(alias='cardID')]]
transformed = transform({'card_id': '<my card ID>'}, Params)
# {'cardID': '<my card ID>'}
```
Any keys / data that does not have type information given will be included as is.
It should be noted that the transformations that this function does are not represented in the type system.
"""
transformed = _transform_recursive(data, annotation=cast(type, expected_type))
return cast(_T, transformed)
def _get_annotated_type(type_: type) -> Union[type, None]:
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
"""
if is_required_type(type_):
# Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
type_ = get_args(type_)[0]
if is_annotated_type(type_):
return type_
return None
def _maybe_transform_key(key: str, type_: type) -> str:
"""Transform the given `data` based on the annotations provided in `type_`.
Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata.
"""
annotated_type = _get_annotated_type(type_)
if annotated_type is None:
# no `Annotated` definition for this type, no transformation needed
return key
# ignore the first argument as it is the actual type
annotations = get_args(annotated_type)[1:]
for annotation in annotations:
if isinstance(annotation, PropertyInfo) and annotation.alias is not None:
return annotation.alias
return key
def _transform_recursive(
data: object,
*,
annotation: type,
inner_type: Optional[type] = None,
) -> object:
"""Transform the given data against the expected type.
Args:
annotation: The direct type annotation given to the particular piece of data.
This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
the list can be transformed using the metadata from the container type.
Defaults to the same value as the `annotation` argument.
"""
if inner_type is None:
inner_type = annotation
stripped_type = strip_annotated_type(inner_type)
if is_typeddict(stripped_type) and is_mapping(data):
return _transform_typeddict(data, stripped_type)
| from __future__ import annotations
_T = TypeVar("_T")
PropertyFormat = Literal["iso8601", "custom"]
class PropertyInfo:
"""Metadata class to be used in Annotated types to provide information about a given type.
For example:
class MyParams(TypedDict):
account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
"""
alias: Optional[str]
format: Optional[PropertyFormat]
format_template: Optional[str]
def __init__(
self,
*,
alias: Optional[str] = None,
format: Optional[PropertyFormat] = None,
format_template: Optional[str] = None,
) -> None:
self.alias = alias
self.format = format
self.format_template = format_template
@override
def __repr__(self) -> str:
return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}')"
def maybe_transform(
data: object,
expected_type: object,
) -> Optional[Any]:
"""Wrapper over `transform()` that allows `None` to be passed.
See `transform()` for more details.
"""
if data is None:
return None
return transform(data, expected_type)
# Wrapper over _transform_recursive providing fake types
def transform(
data: _T,
expected_type: object,
) -> _T:
"""Transform dictionaries based off of type information from the given type, for example:
```py
class Params(TypedDict, total=False):
card_id: Required[Annotated[str, PropertyInfo(alias='cardID')]]
transformed = transform({'card_id': '<my card ID>'}, Params)
# {'cardID': '<my card ID>'}
```
Any keys / data that does not have type information given will be included as is.
It should be noted that the transformations that this function does are not represented in the type system.
"""
transformed = _transform_recursive(data, annotation=cast(type, expected_type))
return cast(_T, transformed)
def _get_annotated_type(type_: type) -> Union[type, None]:
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
"""
if is_required_type(type_):
# Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
type_ = get_args(type_)[0]
if is_annotated_type(type_):
return type_
return None
def _maybe_transform_key(key: str, type_: type) -> str:
"""Transform the given `data` based on the annotations provided in `type_`.
Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata.
"""
annotated_type = _get_annotated_type(type_)
if annotated_type is None:
# no `Annotated` definition for this type, no transformation needed
return key
# ignore the first argument as it is the actual type
annotations = get_args(annotated_type)[1:]
for annotation in annotations:
if isinstance(annotation, PropertyInfo) and annotation.alias is not None:
return annotation.alias
return key
def _transform_recursive(
data: object,
*,
annotation: type,
inner_type: Optional[type] = None,
) -> object:
"""Transform the given data against the expected type.
Args:
annotation: The direct type annotation given to the particular piece of data.
This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
the list can be transformed using the metadata from the container type.
Defaults to the same value as the `annotation` argument.
"""
if inner_type is None:
inner_type = annotation
stripped_type = strip_annotated_type(inner_type)
if is_typeddict(stripped_type) and is_mapping(data):
return _transform_typeddict(data, stripped_type)
| if is_list_type(stripped_type) and is_list(data): | 2 | 2023-12-14 18:12:17+00:00 | 4k |
Jack24658735/FedLGT | models/CTran.py | [
{
"identifier": "SelfAttnLayer",
"path": "models/transformer_layers.py",
"snippet": "class SelfAttnLayer(nn.Module):\n def __init__(self, d_model, nhead = 4,dropout=0.1):\n super().__init__()\n self.transformer_layer = TransformerEncoderLayer(d_model, nhead, d_model*1, dropout=dropout, activation='relu')\n # self.transformer_layer = nn.TransformerEncoderLayer(d_model, nhead, d_model, dropout=dropout, activation='gelu') \n\n def forward(self,k,mask=None):\n attn = None\n k=k.transpose(0,1) \n x,attn = self.transformer_layer(k,src_mask=mask)\n # x = self.transformer_layer(k,src_mask=mask)\n x=x.transpose(0,1)\n return x,attn"
},
{
"identifier": "Backbone",
"path": "models/backbone.py",
"snippet": "class Backbone(nn.Module):\n def __init__(self):\n super(Backbone, self).__init__()\n embedding_dim = 512\n self.freeze_base = False\n self.freeze_base4 = False\n\n # self.base_network = models.resnet101(pretrained=True)\n self.base_network = models.resnet18(pretrained=True)\n # self.base_network = models.resnet50(pretrained=True)\n\n # self.base_network.avgpool = nn.AvgPool2d(kernel_size=7,stride=1,padding=0) # replace avg pool\n # self.base_network.avgpool = nn.AvgPool2d(2,stride=2) # replace avg pool\n\n if self.freeze_base:\n for param in self.base_network.parameters():\n param.requires_grad = False\n elif self.freeze_base4:\n for p in self.base_network.layer4.parameters(): \n p.requires_grad=True\n\n def forward(self,images):\n x = self.base_network.conv1(images)\n x = self.base_network.bn1(x)\n x = self.base_network.relu(x)\n x = self.base_network.maxpool(x)\n x = self.base_network.layer1(x)\n x = self.base_network.layer2(x)\n x = self.base_network.layer3(x)\n x = self.base_network.layer4(x)\n # x = self.base_network.avgpool(x)\n \n return x"
},
{
"identifier": "BackboneCLIP",
"path": "models/backbone.py",
"snippet": "class BackboneCLIP(nn.Module):\n def __init__(self, model=None):\n super(BackboneCLIP, self).__init__()\n # self.base_network = models.resnet101(pretrained=True)\n # self.base_network = models.resnet18(pretrained=True)\n # model, _ = clip.load(\"RN50\")\n # print()\n model, _ = clip.load(\"ViT-B/16\", device='cuda')\n self.base_network = model.visual\n for param in self.base_network.parameters():\n param.requires_grad = False\n # self.base_network.avgpool = nn.AvgPool2d(kernel_size=7,stride=1,padding=0) # replace avg pool\n # self.base_network.avgpool = nn.AvgPool2d(2,stride=2) # replace avg pool\n\n # print(self.base_network)\n # if self.freeze_base:\n # for param in self.base_network.parameters():\n # param.requires_grad = False\n # elif self.freeze_base4:\n # for p in self.base_network.layer4.parameters(): \n # p.requires_grad=True\n\n def forward(self, x: torch.Tensor):\n x = self.base_network.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat([self.base_network.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.base_network.positional_embedding.to(x.dtype)\n x = self.base_network.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.base_network.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # x = self.base_network.ln_post(x[:, :, :])\n x = self.base_network.ln_post(x[:, 0, :])\n\n if self.base_network.proj is not None:\n x = x @ self.base_network.proj\n\n return x"
},
{
"identifier": "custom_replace",
"path": "models/utils.py",
"snippet": "def custom_replace(tensor,on_neg_1,on_zero,on_one):\n res = tensor.clone()\n res[tensor==-1] = on_neg_1\n res[tensor==0] = on_zero\n res[tensor==1] = on_one\n return res"
},
{
"identifier": "weights_init",
"path": "models/utils.py",
"snippet": "def weights_init(module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n stdv = 1. / math.sqrt(module.weight.size(1))\n module.weight.data.uniform_(-stdv, stdv)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.uniform_(-stdv, stdv)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)"
},
{
"identifier": "PositionEmbeddingSine",
"path": "models/position_enc.py",
"snippet": "class PositionEmbeddingSine(nn.Module):\n \"\"\"\n This is a more standard version of the position embedding, very similar to the one\n used by the Attention is all you need paper, generalized to work on images.\n \"\"\"\n def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):\n super().__init__()\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n if scale is not None and normalize is False:\n raise ValueError(\"normalize should be True if scale is passed\")\n if scale is None:\n scale = 2 * math.pi\n self.scale = scale\n\n def forward(self, x, mask):\n # x = tensor_list.tensors\n # mask = tensor_list.mask\n assert mask is not None\n not_mask = ~mask\n # stop()\n y_embed = not_mask.cumsum(1)#, dtype=torch.float32)\n x_embed = not_mask.cumsum(2)#, dtype=torch.float32)\n if self.normalize:\n eps = 1e-6\n y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale\n x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale\n\n dim_t = torch.arange(self.num_pos_feats, device=x.device)#, dtype=torch.float32)\n dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)\n\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n # stop()\n \n pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos"
},
{
"identifier": "positionalencoding2d",
"path": "models/position_enc.py",
"snippet": "def positionalencoding2d(d_model, height, width):\n \"\"\"\n :param d_model: dimension of the model\n :param height: height of the positions\n :param width: width of the positions\n :return: d_model*height*width position matrix\n \"\"\"\n if d_model % 4 != 0:\n raise ValueError(\"Cannot use sin/cos positional encoding with \"\n \"odd dimension (got dim={:d})\".format(d_model))\n pe = torch.zeros(d_model, height, width)\n # Each dimension use half of d_model\n d_model = int(d_model / 2)\n div_term = torch.exp(torch.arange(0., d_model, 2) *\n -(math.log(10000.0) / d_model))\n pos_w = torch.arange(0., width).unsqueeze(1)\n pos_h = torch.arange(0., height).unsqueeze(1)\n pe[0:d_model:2, :, :] = torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)\n pe[1:d_model:2, :, :] = torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)\n pe[d_model::2, :, :] = torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)\n pe[d_model + 1::2, :, :] = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)\n\n return pe"
},
{
"identifier": "MLDecoder",
"path": "models/ml_decoder.py",
"snippet": "class MLDecoder(nn.Module):\n def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768,\n initial_num_features=2048, zsl=0):\n super(MLDecoder, self).__init__()\n embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups\n if embed_len_decoder > num_classes:\n embed_len_decoder = num_classes\n\n # switching to 768 initial embeddings\n decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding\n embed_standart = nn.Linear(initial_num_features, decoder_embedding)\n\n # non-learnable queries\n if not zsl:\n query_embed = nn.Embedding(embed_len_decoder, decoder_embedding)\n query_embed.requires_grad_(False)\n else:\n query_embed = None\n\n # decoder\n decoder_dropout = 0.1\n num_layers_decoder = 1\n dim_feedforward = 2048\n layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding,\n dim_feedforward=dim_feedforward, dropout=decoder_dropout)\n self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder)\n self.decoder.embed_standart = embed_standart\n self.decoder.query_embed = query_embed\n self.zsl = zsl\n\n if self.zsl:\n if decoder_embedding != 300:\n self.wordvec_proj = nn.Linear(300, decoder_embedding)\n else:\n self.wordvec_proj = nn.Identity()\n self.decoder.duplicate_pooling = torch.nn.Parameter(torch.Tensor(decoder_embedding, 1))\n self.decoder.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(1))\n self.decoder.duplicate_factor = 1\n else:\n # group fully-connected\n self.decoder.num_classes = num_classes\n self.decoder.duplicate_factor = int(num_classes / embed_len_decoder + 0.999)\n self.decoder.duplicate_pooling = torch.nn.Parameter(\n torch.Tensor(embed_len_decoder, decoder_embedding, self.decoder.duplicate_factor))\n self.decoder.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes))\n torch.nn.init.xavier_normal_(self.decoder.duplicate_pooling)\n torch.nn.init.constant_(self.decoder.duplicate_pooling_bias, 0)\n self.decoder.group_fc = GroupFC(embed_len_decoder)\n self.train_wordvecs = None\n self.test_wordvecs = None\n\n def forward(self, x, q):\n if len(x.shape) == 4: # [bs,2048, 7,7]\n embedding_spatial = x.flatten(2).transpose(1, 2)\n else: # [bs, 197,468]\n embedding_spatial = x\n embedding_spatial_786 = self.decoder.embed_standart(embedding_spatial)\n embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True)\n\n bs = embedding_spatial_786.shape[0]\n if self.zsl:\n query_embed = torch.nn.functional.relu(self.wordvec_proj(self.decoder.query_embed))\n else:\n # query_embed = self.decoder.query_embed.weight\n query_embed = q\n # tgt = query_embed.unsqueeze(1).repeat(1, bs, 1)\n # tgt = query_embed.unsqueeze(1).expand(-1, 1, -1) # no allocation of memory with expand\n # print(query_embed.shape)\n tgt = query_embed.transpose(1,0).float()\n h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) # [embed_len_decoder, batch, 768]\n h = h.transpose(0, 1)\n\n out_extrap = torch.zeros(h.shape[0], h.shape[1], self.decoder.duplicate_factor, device=h.device, dtype=h.dtype)\n self.decoder.group_fc(h, self.decoder.duplicate_pooling, out_extrap)\n if not self.zsl:\n h_out = out_extrap.flatten(1)[:, :self.decoder.num_classes]\n else:\n h_out = out_extrap.flatten(1)\n h_out += self.decoder.duplicate_pooling_bias\n logits = h_out\n return logits"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .transformer_layers import SelfAttnLayer
from .backbone import Backbone, BackboneCLIP
from .utils import custom_replace,weights_init
from .position_enc import PositionEmbeddingSine,positionalencoding2d
from .ml_decoder import MLDecoder | 3,382 |
class CTranModel(nn.Module):
def __init__(self,num_labels,use_lmt,pos_emb=False,layers=3,heads=4,dropout=0.1,int_loss=0,no_x_features=False, state_weight=None, label_weight=None):
super(CTranModel, self).__init__()
self.use_lmt = use_lmt
self.no_x_features = no_x_features # (for no image features)
# ResNet backbone
|
class CTranModel(nn.Module):
def __init__(self,num_labels,use_lmt,pos_emb=False,layers=3,heads=4,dropout=0.1,int_loss=0,no_x_features=False, state_weight=None, label_weight=None):
super(CTranModel, self).__init__()
self.use_lmt = use_lmt
self.no_x_features = no_x_features # (for no image features)
# ResNet backbone | self.backbone = Backbone() | 1 | 2023-12-09 09:16:59+00:00 | 4k |
AgriCodeHub/dairy-django-backend | tests/core/tests/conftest.py | [
{
"identifier": "CowAvailabilityChoices",
"path": "core/choices.py",
"snippet": "class CowAvailabilityChoices(models.TextChoices):\n \"\"\"\n Choices for the availability status of a cow.\n\n Choices:\n - `ALIVE`: Cow is alive and active.\n - `SOLD`: Cow has been sold.\n - `DEAD`: Cow has died.\n\n Usage:\n These choices represent the availability status of a cow in the Cow model.\n Use these choices when defining or querying Cow instances to represent the current status of a cow.\n\n Example:\n ```\n class Cow(models.Model):\n availability_status = models.CharField(max_length=50, choices=CowAvailabilityChoices.choices)\n ```\n \"\"\"\n\n ALIVE = \"Alive\"\n SOLD = \"Sold\"\n DEAD = \"Dead\""
},
{
"identifier": "CowBreedChoices",
"path": "core/choices.py",
"snippet": "class CowBreedChoices(models.TextChoices):\n \"\"\"\n Enumeration of choices for representing different cow breeds.\n\n Choices:\n - `FRIESIAN`: Represents the Friesian cow breed.\n - `SAHIWAL`: Represents the Sahiwal cow breed.\n - `JERSEY`: Represents the Jersey cow breed.\n - `GUERNSEY`: Represents the Guernsey cow breed.\n - `CROSSBREED`: Represents a crossbreed of cows.\n - `AYRSHIRE`: Represents the Ayrshire cow breed.\n\n Usage:\n This enumeration provides predefined choices for the cow breed field in the CowBreed model.\n Use these choices when defining or querying CowBreed instances to represent specific cow breeds.\n\n Example:\n ```\n class CowBreed(models.Model):\n name = models.CharField(max_length=50, choices=CowBreedChoices.choices)\n ```\n\n \"\"\"\n\n FRIESIAN = \"Friesian\"\n SAHIWAL = \"Sahiwal\"\n JERSEY = \"Jersey\"\n GUERNSEY = \"Guernsey\"\n CROSSBREED = \"Crossbreed\"\n AYRSHIRE = \"Ayrshire\""
},
{
"identifier": "CowCategoryChoices",
"path": "core/choices.py",
"snippet": "class CowCategoryChoices(models.TextChoices):\n \"\"\"\n Choices for the category of a cow.\n\n Choices:\n - `CALF`: Represents a calf.\n - `WEANER`: Represents a weaner.\n - `HEIFER`: Represents a heifer.\n - `BULL`: Represents a bull.\n - `MILKING_COW`: Represents a milking cow.\n\n Usage:\n These choices represent the category of a cow in the Cow model.\n Use these choices when defining or querying Cow instances to represent the category of a cow.\n\n Example:\n ```\n class Cow(models.Model):\n category = models.CharField(max_length=15, choices=CowCategoryChoices.choices)\n ```\n \"\"\"\n\n CALF = \"Calf\"\n WEANER = \"Weaner\"\n HEIFER = \"Heifer\"\n BULL = \"Bull\"\n MILKING_COW = \"Milking Cow\""
},
{
"identifier": "CowPregnancyChoices",
"path": "core/choices.py",
"snippet": "class CowPregnancyChoices(models.TextChoices):\n \"\"\"\n Choices for the pregnancy status of a cow.\n\n Choices:\n - `OPEN`: Cow is not pregnant.\n - `PREGNANT`: Cow is pregnant.\n - `CALVED`: Cow has calved.\n - `UNAVAILABLE`: Cow cannot have pregnancy status.\n\n Usage:\n These choices represent the pregnancy status of a cow in the Cow model.\n Use these choices when defining or querying Cow instances to represent the current pregnancy status of a cow.\n\n Example:\n ```\n class Cow(models.Model):\n current_pregnancy_status = models.CharField(max_length=15, choices=CowPregnancyChoices.choices)\n ```\n \"\"\"\n\n OPEN = \"Open\"\n PREGNANT = \"Pregnant\"\n CALVED = \"Calved\"\n UNAVAILABLE = \"Unavailable\""
},
{
"identifier": "CowProductionStatusChoices",
"path": "core/choices.py",
"snippet": "class CowProductionStatusChoices(models.TextChoices):\n \"\"\"\n Choices for the production status of a cow.\n\n Choices:\n - `OPEN`: Cow is open (not pregnant or lactating).\n - `PREGNANT_NOT_LACTATING`: Cow is pregnant but not lactating.\n - `PREGNANT_AND_LACTATING`: Cow is pregnant and lactating.\n - `DRY`: Cow is dry (not lactating).\n - `CULLED`: Cow has been culled.\n - `QUARANTINED`: Cow is quarantined.\n - `BULL`: Represents a bull.\n - `YOUNG_BULL`: Represents a young bull.\n - `YOUNG_HEIFER`: Represents a young heifer.\n - `MATURE_BULL`: Represents a mature bull.\n - `CALF`: Represents a calf.\n - `WEANER`: Represents a weaner.\n\n Usage:\n These choices represent the production status of a cow in the Cow model.\n Use these choices when defining or querying Cow instances to represent the current production status of a cow.\n\n Example:\n ```\n class Cow(models.Model):\n current_production_status = models.CharField(max_length=15, choices=CowProductionStatusChoices.choices)\n ```\n \"\"\"\n\n OPEN = \"Open\"\n PREGNANT_NOT_LACTATING = \"Pregnant not Lactating\"\n PREGNANT_AND_LACTATING = \"Pregnant and Lactating\"\n DRY = \"Dry\"\n CULLED = \"Culled\"\n QUARANTINED = \"Quarantined\"\n BULL = \"Bull\"\n YOUNG_BULL = \"Young Bull\"\n YOUNG_HEIFER = \"Young Heifer\"\n MATURE_BULL = \"Mature Bull\"\n CALF = \"Calf\"\n WEANER = \"Weaner\""
},
{
"identifier": "SexChoices",
"path": "users/choices.py",
"snippet": "class SexChoices(models.TextChoices):\n MALE = \"Male\"\n FEMALE = \"Female\""
},
{
"identifier": "todays_date",
"path": "core/utils.py",
"snippet": ""
}
] | from datetime import timedelta
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.choices import (
CowAvailabilityChoices,
CowBreedChoices,
CowCategoryChoices,
CowPregnancyChoices,
CowProductionStatusChoices,
)
from users.choices import SexChoices
from core.utils import todays_date
import pytest | 2,540 | "is_farm_owner": True,
}
farm_owner_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_owner_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_owner_login_data)
farm_owner_token = response.data["auth_token"]
# Create farm manager user
farm_manager_data = {
"username": "[email protected]",
"email": "[email protected]",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Manager",
"phone_number": "+254755555555",
"sex": SexChoices.MALE,
"is_farm_manager": True,
}
farm_manager_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_manager_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_manager_login_data)
farm_manager_token = response.data["auth_token"]
# Create assistant farm manager user
asst_farm_manager_data = {
"username": "[email protected]",
"email": "[email protected]",
"password": "testpassword",
"first_name": "Assistant",
"last_name": "Farm Manager",
"phone_number": "+254744444444",
"sex": SexChoices.FEMALE,
"is_assistant_farm_manager": True,
}
asst_farm_manager_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", asst_farm_manager_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), asst_farm_manager_login_data)
asst_farm_manager_token = response.data["auth_token"]
# Create team leader user
team_leader_data = {
"username": "[email protected]",
"email": "[email protected]",
"password": "testpassword",
"first_name": "Team",
"last_name": "Leader",
"phone_number": "+254733333333",
"sex": SexChoices.MALE,
"is_team_leader": True,
}
team_leader_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", team_leader_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), team_leader_login_data)
assert response.status_code == status.HTTP_200_OK
team_leader_token = response.data["auth_token"]
# Create farm worker user
farm_worker_data = {
"username": "[email protected]",
"email": "[email protected]",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Worker",
"phone_number": "+254722222222",
"sex": SexChoices.FEMALE,
"is_farm_worker": True,
}
farm_worker_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_worker_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_worker_login_data)
farm_worker_token = response.data["auth_token"]
return {
"client": client,
"farm_owner_token": farm_owner_token,
"farm_manager_token": farm_manager_token,
"asst_farm_manager_token": asst_farm_manager_token,
"team_leader_token": team_leader_token,
"farm_worker_token": farm_worker_token,
}
@pytest.fixture
def setup_cows():
"""
Fixture to create a sample cows object for testing.
"""
general_cow = {
"name": "General Cow",
"breed": {"name": CowBreedChoices.JERSEY},
"date_of_birth": todays_date - timedelta(days=370),
"gender": SexChoices.FEMALE,
"availability_status": CowAvailabilityChoices.ALIVE,
"current_pregnancy_status": CowPregnancyChoices.OPEN,
|
@pytest.fixture()
@pytest.mark.django_db
def setup_users():
client = APIClient()
# Create farm owner user
farm_owner_data = {
"username": "[email protected]",
"email": "[email protected]",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Owner",
"phone_number": "+254787654321",
"sex": SexChoices.MALE,
"is_farm_owner": True,
}
farm_owner_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_owner_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_owner_login_data)
farm_owner_token = response.data["auth_token"]
# Create farm manager user
farm_manager_data = {
"username": "[email protected]",
"email": "[email protected]",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Manager",
"phone_number": "+254755555555",
"sex": SexChoices.MALE,
"is_farm_manager": True,
}
farm_manager_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_manager_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_manager_login_data)
farm_manager_token = response.data["auth_token"]
# Create assistant farm manager user
asst_farm_manager_data = {
"username": "[email protected]",
"email": "[email protected]",
"password": "testpassword",
"first_name": "Assistant",
"last_name": "Farm Manager",
"phone_number": "+254744444444",
"sex": SexChoices.FEMALE,
"is_assistant_farm_manager": True,
}
asst_farm_manager_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", asst_farm_manager_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), asst_farm_manager_login_data)
asst_farm_manager_token = response.data["auth_token"]
# Create team leader user
team_leader_data = {
"username": "[email protected]",
"email": "[email protected]",
"password": "testpassword",
"first_name": "Team",
"last_name": "Leader",
"phone_number": "+254733333333",
"sex": SexChoices.MALE,
"is_team_leader": True,
}
team_leader_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", team_leader_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), team_leader_login_data)
assert response.status_code == status.HTTP_200_OK
team_leader_token = response.data["auth_token"]
# Create farm worker user
farm_worker_data = {
"username": "[email protected]",
"email": "[email protected]",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Worker",
"phone_number": "+254722222222",
"sex": SexChoices.FEMALE,
"is_farm_worker": True,
}
farm_worker_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_worker_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_worker_login_data)
farm_worker_token = response.data["auth_token"]
return {
"client": client,
"farm_owner_token": farm_owner_token,
"farm_manager_token": farm_manager_token,
"asst_farm_manager_token": asst_farm_manager_token,
"team_leader_token": team_leader_token,
"farm_worker_token": farm_worker_token,
}
@pytest.fixture
def setup_cows():
"""
Fixture to create a sample cows object for testing.
"""
general_cow = {
"name": "General Cow",
"breed": {"name": CowBreedChoices.JERSEY},
"date_of_birth": todays_date - timedelta(days=370),
"gender": SexChoices.FEMALE,
"availability_status": CowAvailabilityChoices.ALIVE,
"current_pregnancy_status": CowPregnancyChoices.OPEN, | "category": CowCategoryChoices.HEIFER, | 2 | 2023-12-09 06:56:42+00:00 | 4k |
facebookresearch/chat2map-official | habitat_audio/simulator.py | [
{
"identifier": "load_points_data",
"path": "habitat_audio/utils.py",
"snippet": "def load_points_data(parent_folder, graph_file, transform=True, scene_dataset=\"replica\"):\n \"\"\"\n Main method to load points data from files stored on disk and transform if necessary\n :param parent_folder: parent folder containing files with points data\n :param graph_file: files containing connectivity of points per scene\n :param transform: transform coordinate systems of loaded points for use in Habitat or not\n :param scene_dataset: name of scenes dataset (\"replica\", \"mp3d\", etc.)\n :return: 1. points in transformed coordinate system for use with Habitat\n 2. graph object containing information about the connectivity of points in a scene\n \"\"\"\n points_file = os.path.join(parent_folder, 'points.txt')\n graph_file = os.path.join(parent_folder, graph_file)\n\n points = None\n if os.path.isfile(points_file): \n _, points = load_points(points_file, transform=transform, scene_dataset=scene_dataset)\n if not os.path.exists(graph_file):\n raise FileExistsError(graph_file + ' does not exist!')\n else:\n with open(graph_file, 'rb') as fo:\n graph = pickle.load(fo)\n\n return points, graph"
},
{
"identifier": "_to_tensor",
"path": "habitat_audio/utils.py",
"snippet": "def _to_tensor(v):\n if torch.is_tensor(v):\n return v\n elif isinstance(v, np.ndarray):\n return torch.from_numpy(v)\n else:\n return torch.tensor(v, dtype=torch.float)"
}
] | from typing import List
from collections import defaultdict
from scipy.io import wavfile
from scipy.signal import fftconvolve
from habitat.core.registry import registry
from habitat.tasks.utils import (
cartesian_to_polar,
quaternion_from_coeff,
quaternion_rotate_vector,
)
from habitat_sim.utils.common import quat_from_angle_axis, quat_from_coeffs, quat_to_angle_axis
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.core.simulator import (Config, AgentState, ShortestPathPoint)
from habitat_audio.utils import load_points_data, _to_tensor
import logging
import pickle
import os
import cv2
import torch
import librosa
import scipy
import numba
import numpy as np
import networkx as nx
import habitat_sim | 3,596 | self.rir_sampling_rate = self.audio_cfg.RIR_SAMPLING_RATE
self._max_valid_impulse_length = self.audio_cfg.MAX_VALID_IMPULSE_LENGTH_AFTER_REMOVING_LEADING_ZEROS
self.hop_length = self.audio_cfg.HOP_LENGTH
self.n_fft = self.audio_cfg.N_FFT
self.win_length = self.audio_cfg.WIN_LENGTH
self._anechoic_audio_slice_length = self.audio_cfg.ANECHOIC_AUDIO_SLICE_LENGTH
self._audio_wav_shape = self.task_cfg.CONTEXT_SELF_AUDIO_SENSOR.FEATURE_SHAPE
print(f"LOADING ANECHOIC AUDIO FOR train")
anechoic_audio_dir = self.audio_cfg.ANECHOIC_DIR
assert os.path.isdir(anechoic_audio_dir)
anechoic_audio_filenames = os.listdir(anechoic_audio_dir)
self._anechoic_filename_2_audioData = {}
for anechoic_audio_filename in anechoic_audio_filenames:
anechoic_audio_filePath = os.path.join(anechoic_audio_dir, anechoic_audio_filename)
assert os.path.isfile(anechoic_audio_filePath)
anechoic_audioSR, anechoic_audioData = wavfile.read(anechoic_audio_filePath)
assert anechoic_audioSR == self.rir_sampling_rate
assert anechoic_audio_filename.split(".")[0] not in self._anechoic_filename_2_audioData
self._anechoic_filename_2_audioData[anechoic_audio_filename.split(".")[0]] = anechoic_audioData
assert "CONTEXT_VIEW_POSE_SENSOR" in self.task_cfg.SENSORS
self._pose_feat_shape = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.FEATURE_SHAPE
self._add_truncated_gaussian_pose_noise = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.ADD_TRUNCATED_GAUSSIAN_NOISE
self._truncated_gaussian_pose_noise_cfg = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.TRUNCATED_GAUSSIAN_NOISE
# self._truncated_gaussian_pose_noise_random_multipliers = None
self._gaussian_pose_noise_multipliers = None
if self._add_truncated_gaussian_pose_noise:
assert os.path.isfile(self._truncated_gaussian_pose_noise_cfg.GAUSSIAN_NOISE_MULTIPLIERS_PATH)
with open(self._truncated_gaussian_pose_noise_cfg.GAUSSIAN_NOISE_MULTIPLIERS_PATH, "rb") as fi:
self._gaussian_pose_noise_multipliers = pickle.load(fi)
self.max_context_length = self.env_cfg.MAX_CONTEXT_LENGTH
self.visual_budget = self.env_cfg.VISUAL_BUDGET
self.max_query_length = self.env_cfg.MAX_QUERY_LENGTH
assert self.max_query_length == (self.config.ALL_AGENTS.NUM * self.max_context_length)
self.render_local_ego_occ_maps_from_depth_images = self.config.RENDER_LOCAL_EGO_OCC_MAPS_FROM_DEPTH_IMAGES
self.local_occMap_cfg = self.config.LOCAL_MAP
self.ego_mapper = None
self.redwood_depth_noise_dist_model = None
self.redwood_depth_noise_multiplier = None
if self.render_local_ego_occ_maps_from_depth_images:
self.ego_mapper = EgoMap(
map_size=self.local_occMap_cfg.SIZE,
map_scale=self.local_occMap_cfg.SCALE,
position=self.local_occMap_cfg.AGENT_POSITION,
depth_sensor_hfov=self.local_occMap_cfg.HFOV_DEPTH_IMG,
height_thresh=self.local_occMap_cfg.HEIGHT_THRESH,
depth_sensor_min_depth=self.local_occMap_cfg.MIN_DEPTH,
depth_sensor_max_depth=self.local_occMap_cfg.MAX_DEPTH,
depth_sensor_width=self.local_occMap_cfg.WIDTH_DEPTH_IMG,
depth_sensor_height=self.local_occMap_cfg.HEIGHT_DEPTH_IMG,
depth_sensor_normalize_depth=self.local_occMap_cfg.NORMALIZE_DEPTH_IMG,
)
if self.config.DEPTH_SENSOR.ADD_REDWOOD_NOISE:
"""src: https://github.com/facebookresearch/habitat-sim/blob/main/src_python/habitat_sim/sensors/noise_models/redwood_depth_noise_model.py"""
assert os.path.isfile(self.config.DEPTH_SENSOR.REDWOOD_DEPTH_NOISE_DIST_MODEL)
self.redwood_depth_noise_dist_model = np.load(self.config.DEPTH_SENSOR.REDWOOD_DEPTH_NOISE_DIST_MODEL)
self.redwood_depth_noise_dist_model = self.redwood_depth_noise_dist_model.reshape(80, 80, 5)
self.redwood_depth_noise_multiplier = self.config.DEPTH_SENSOR.REDWOOD_NOISE_MULTIPLIER
assert os.path.isfile(self.config.DEPTH_SENSOR.REDWOOD_NOISE_RAND_NUMS_PATH)
with open(self.config.DEPTH_SENSOR.REDWOOD_NOISE_RAND_NUMS_PATH, "rb") as fi:
self._redwood_depth_noise_rand_nums = pickle.load(fi)
self.stitch_top_down_maps = self.config.STITCH_TOP_DOWN_MAPS
self.rir_dir = self.audio_cfg.RIR_DIR
assert os.path.isdir(self.rir_dir)
self.num_agents = self.config.ALL_AGENTS.NUM
assert self.num_agents == 2
self.total_context_length = None
self.agent_utterance_allSwitches = None
self.lst_anechoicAudio_filenameNstartSamplingIdx = None
self.used_query_nodsNrots = None
self._current_context_rgb = None
self._current_context_ego_local_map = None
self._current_context_view_pose = None
self._current_context_view_rAz = None
self._previous_context_view_mask = None
self._current_context_selfAudio = None
self._current_context_otherAudio = None
self._current_context_otherAudio_pose = None
self._current_context_audio_mask = None
self._all_context_audio_mask = None
self._current_query_globCanMapEgoCrop_gt = None
self._current_query_globCanMapEgoCrop_gt_exploredPartMask = None
self._current_query_mask = None
self._all_query_mask = None
if self.stitch_top_down_maps:
self._current_stitched_query_globCanMapEgoCrop_gt = None
assert self.config.SCENE_DATASET in ["mp3d"],\
"SCENE_DATASET needs to be in ['mp3d']"
self._previous_receiver_position_indexs = [None] * self.num_agents
self._current_receiver_position_indexs = [None] * self.num_agents
self._previous_rotation_angles = [None] * self.num_agents
self._current_rotation_angles = [None] * self.num_agents
self._frame_cache = defaultdict(dict)
self._episode_count = 0
self._step_count = 0
self._view_count = self.num_agents
self._action = 1
self._is_episode_active = None
self._previous_step_collideds = [None] * self.num_agents
self._nodes_n_azimuths_lists = [None] * self.num_agents
self._position_to_index_mapping = dict()
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
EPS = 1e-8
SCENE_NAME_TO_IDX = {
"mp3d":
{'sT4fr6TAbpF': 0, 'E9uDoFAP3SH': 1, 'VzqfbhrpDEA': 2, 'kEZ7cmS4wCh': 3, '29hnd4uzFmX': 4, 'ac26ZMwG7aT': 5,
's8pcmisQ38h': 6, 'rPc6DW4iMge': 7, 'EDJbREhghzL': 8, 'mJXqzFtmKg4': 9, 'B6ByNegPMKs': 10, 'JeFG25nYj2p': 11,
'82sE5b5pLXE': 12, 'D7N2EKCX4Sj': 13, '7y3sRwLe3Va': 14, '5LpN3gDmAk7': 15, 'gTV8FGcVJC9': 16, 'ur6pFq6Qu1A': 17,
'qoiz87JEwZ2': 18, 'PuKPg4mmafe': 19, 'VLzqgDo317F': 20, 'aayBHfsNo7d': 21, 'JmbYfDe2QKZ': 22, 'XcA2TqTSSAj': 23,
'8WUmhLawc2A': 24, 'sKLMLpTHeUy': 25, 'r47D5H71a5s': 26, 'Uxmj2M2itWa': 27, 'Pm6F8kyY3z2': 28, 'p5wJjkQkbXX': 29,
'759xd9YjKW5': 30, 'JF19kD82Mey': 31, 'V2XKFyX4ASd': 32, '1LXtFkjw3qL': 33, '17DRP5sb8fy': 34, '5q7pvUzZiYa': 35,
'VVfe2KiqLaN': 36, 'Vvot9Ly1tCj': 37, 'ULsKaCPVFJR': 38, 'D7G3Y4RVNrH': 39, 'uNb9QFRL6hY': 40, 'ZMojNkEp431': 41,
'2n8kARJN3HM': 42, 'vyrNrziPKCB': 43, 'e9zR4mvMWw7': 44, 'r1Q1Z4BcV1o': 45, 'PX4nDJXEHrG': 46, 'YmJkqBEsHnH': 47,
'b8cTxDM8gDG': 48, 'GdvgFV5R1Z5': 49, 'pRbA3pwrgk9': 50, 'jh4fc5c5qoQ': 51, '1pXnuDYAj8r': 52, 'S9hNv5qa7GM': 53,
'VFuaQ6m2Qom': 54, 'cV4RVeZvu5T': 55, 'SN83YJsR3w2': 56, '2azQ1b91cZZ': 57, '5ZKStnWn8Zo': 58, '8194nk5LbLH': 59,
'ARNzJeq3xxb': 60, 'EU6Fwq7SyZv': 61, 'QUCTc6BB5sX': 62, 'TbHJrupSAjP': 63, 'UwV83HsGsw3': 64, 'Vt2qJdWjCF2': 65,
'WYY7iVyf5p8': 66, 'X7HyMhZNoso': 67, 'YFuZgdQ5vWj': 68, 'Z6MFQCViBuw': 69, 'fzynW3qQPVF': 70, 'gYvKGZ5eRqb': 71,
'gxdoqLR6rwA': 72, 'jtcxE69GiFV': 73, 'oLBMNvg9in8': 74, 'pLe4wQe7qrG': 75, 'pa4otMbVnkk': 76, 'q9vSo1VnCiC': 77,
'rqfALeAoiTq': 78, 'wc2JMjhGNzB': 79, 'x8F5xyUWy9e': 80, 'yqstnuAEVhm': 81, 'zsNo4HB9uLZ': 82},
}
SCENE_SPLITS = {
"mp3d":
{
"train": ['sT4fr6TAbpF', 'E9uDoFAP3SH', 'VzqfbhrpDEA', 'kEZ7cmS4wCh', '29hnd4uzFmX',
'ac26ZMwG7aT', 's8pcmisQ38h', 'rPc6DW4iMge', 'EDJbREhghzL', 'mJXqzFtmKg4',
'B6ByNegPMKs', 'JeFG25nYj2p', '82sE5b5pLXE', 'D7N2EKCX4Sj', '7y3sRwLe3Va',
'5LpN3gDmAk7', 'gTV8FGcVJC9', 'ur6pFq6Qu1A', 'qoiz87JEwZ2', 'PuKPg4mmafe',
'VLzqgDo317F', 'aayBHfsNo7d', 'JmbYfDe2QKZ', 'XcA2TqTSSAj', '8WUmhLawc2A',
'sKLMLpTHeUy', 'r47D5H71a5s', 'Uxmj2M2itWa', 'Pm6F8kyY3z2', 'p5wJjkQkbXX',
'759xd9YjKW5', 'JF19kD82Mey', 'V2XKFyX4ASd', '1LXtFkjw3qL', '17DRP5sb8fy',
'5q7pvUzZiYa', 'VVfe2KiqLaN', 'Vvot9Ly1tCj', 'ULsKaCPVFJR', 'D7G3Y4RVNrH',
'uNb9QFRL6hY', 'ZMojNkEp431', '2n8kARJN3HM', 'vyrNrziPKCB', 'e9zR4mvMWw7',
'r1Q1Z4BcV1o', 'PX4nDJXEHrG', 'YmJkqBEsHnH', 'b8cTxDM8gDG', 'GdvgFV5R1Z5',
'pRbA3pwrgk9', 'jh4fc5c5qoQ', '1pXnuDYAj8r', 'S9hNv5qa7GM', 'VFuaQ6m2Qom',
'cV4RVeZvu5T', 'SN83YJsR3w2', ],
"val": ['QUCTc6BB5sX', 'EU6Fwq7SyZv', '2azQ1b91cZZ', 'Z6MFQCViBuw', 'pLe4wQe7qrG', 'oLBMNvg9in8',
'X7HyMhZNoso', 'zsNo4HB9uLZ', 'TbHJrupSAjP', '8194nk5LbLH', ],
"test": ['pa4otMbVnkk', 'yqstnuAEVhm', '5ZKStnWn8Zo', 'Vt2qJdWjCF2', 'wc2JMjhGNzB', 'fzynW3qQPVF',
'UwV83HsGsw3', 'q9vSo1VnCiC', 'ARNzJeq3xxb', 'gYvKGZ5eRqb', 'jtcxE69GiFV', 'gxdoqLR6rwA',
'WYY7iVyf5p8', 'YFuZgdQ5vWj', 'rqfALeAoiTq', 'x8F5xyUWy9e',]
},
}
ALL_AZIMUTHS = [0, 90, 180, 270]
def asnumpy(v):
if torch.is_tensor(v):
return v.cpu().numpy()
elif isinstance(v, np.ndarray):
return v
else:
raise ValueError('Invalid input')
# Read about the noise model here: http://www.alexteichman.com/octo/clams/
# Original source code: http://redwood-data.org/indoor/data/simdepth.py
@numba.jit(nopython=True, fastmath=True)
def undistort_redwood_depth_noise(x, y, z, model):
i2 = int((z + 1) / 2)
i1 = int(i2 - 1)
a = (z - (i1 * 2.0 + 1.0)) / 2.0
x = x // 8
y = y // 6
f = (1.0 - a) * model[y, x, min(max(i1, 0), 4)] + a * model[y, x, min(i2, 4)]
if f < 1e-5:
return 0.0
else:
return z / f
@numba.jit(nopython=True, parallel=True, fastmath=True)
def simulate_redwood_depth_noise(gt_depth, model, noise_multiplier, rand_nums):
noisy_depth = np.empty_like(gt_depth)
H, W = gt_depth.shape
ymax, xmax = H - 1.0, W - 1.0
# rand_nums = np.random.randn(H, W, 3).astype(np.float32)
# Parallelize just the outer loop. This doesn't change the speed
# noticably but reduces CPU usage compared to two parallel loops
for j in numba.prange(H):
for i in range(W):
y = int(
min(max(j + rand_nums[j, i, 0] * 0.25 * noise_multiplier, 0.0), ymax)
+ 0.5
)
x = int(
min(max(i + rand_nums[j, i, 1] * 0.25 * noise_multiplier, 0.0), xmax)
+ 0.5
)
# Downsample
d = gt_depth[y - y % 2, x - x % 2]
# If the depth is greater than 10, the sensor will just return 0
if d >= 10.0:
noisy_depth[j, i] = 0.0
else:
# Distort
# The noise model was originally made for a 640x480 sensor,
# so re-map our arbitrarily sized sensor to that size!
undistorted_d = undistort_redwood_depth_noise(
int(x / xmax * 639.0 + 0.5), int(y / ymax * 479.0 + 0.5), d, model
)
if undistorted_d == 0.0:
noisy_depth[j, i] = 0.0
else:
denom = round(
(
35.130 / undistorted_d
+ rand_nums[j, i, 2] * 0.027778 * noise_multiplier
)
* 8.0
)
if denom <= 1e-5:
noisy_depth[j, i] = 0.0
else:
noisy_depth[j, i] = 35.130 * 8.0 / denom
return noisy_depth
class EgoMap:
r"""Estimates the top-down occupancy based on current depth-map.
Args:
sim: reference to the simulator for calculating task observations.
config: contains the MAP_SCALE, MAP_SIZE, HEIGHT_THRESH fields to
decide grid-size, extents of the projection, and the thresholds
for determining obstacles and explored space.
"""
def __init__(
self, map_size=31, map_scale=0.1, position=[0, 1.25, 0], depth_sensor_hfov=90,
height_thresh=(0.2, 1.5), depth_sensor_min_depth=0, depth_sensor_max_depth=10,
depth_sensor_width=128, depth_sensor_height=128, depth_sensor_normalize_depth=False,
):
# depth sensor attris
self.depth_sensor_normalize_depth = depth_sensor_normalize_depth
# Map statistics
self.map_size = map_size
self.map_scale = map_scale
# Agent height for pointcloud transformation
self.sensor_height = position[1]
# Compute intrinsic matrix
hfov = float(depth_sensor_hfov) * np.pi / 180
vfov = 2 * np.arctan((depth_sensor_height / depth_sensor_width) * np.tan(hfov / 2.0))
self.intrinsic_matrix = np.array([[1 / np.tan(hfov / 2.), 0., 0., 0.],
[0., 1 / np.tan(vfov / 2.), 0., 0.],
[0., 0., 1, 0],
[0., 0., 0, 1]])
self.inverse_intrinsic_matrix = np.linalg.inv(self.intrinsic_matrix)
# Height thresholds for obstacles
self.height_thresh = height_thresh
# Depth processing
self.min_depth = float(depth_sensor_min_depth)
self.max_depth = float(depth_sensor_max_depth)
# Pre-compute a grid of locations for depth projection
W = depth_sensor_width
H = depth_sensor_height
self.proj_xs, self.proj_ys = np.meshgrid(
np.linspace(-1, 1, W),
np.linspace(1, -1, H)
)
def convert_to_pointcloud(self, depth):
"""
Inputs:
depth = (H, W, 1) numpy array
Returns:
xyz_camera = (N, 3) numpy array for (X, Y, Z) in egocentric world coordinates
"""
depth_float = depth.astype(np.float32)[..., 0]
# =========== Convert to camera coordinates ============
W = depth.shape[1]
xs = np.copy(self.proj_xs).reshape(-1)
ys = np.copy(self.proj_ys).reshape(-1)
depth_float = depth_float.reshape(-1)
# Filter out invalid depths
max_forward_range = self.map_size * self.map_scale
valid_depths = (depth_float != 0.0) & (depth_float <= max_forward_range)
xs = xs[valid_depths]
ys = ys[valid_depths]
depth_float = depth_float[valid_depths]
# Unproject
# negate depth as the camera looks along -Z
xys = np.vstack((xs * depth_float,
ys * depth_float,
-depth_float, np.ones(depth_float.shape)))
inv_K = self.inverse_intrinsic_matrix
xyz_camera = np.matmul(inv_K, xys).T # XYZ in the camera coordinate system
xyz_camera = xyz_camera[:, :3] / xyz_camera[:, 3][:, np.newaxis]
return xyz_camera
def safe_assign(self, im_map, x_idx, y_idx, value):
try:
im_map[x_idx, y_idx] = value
except IndexError:
valid_idx1 = np.logical_and(x_idx >= 0, x_idx < im_map.shape[0])
valid_idx2 = np.logical_and(y_idx >= 0, y_idx < im_map.shape[1])
valid_idx = np.logical_and(valid_idx1, valid_idx2)
im_map[x_idx[valid_idx], y_idx[valid_idx]] = value
def _get_depth_projection(self, sim_depth):
"""
Project pixels visible in depth-map to ground-plane
"""
if self.depth_sensor_normalize_depth:
depth = sim_depth * (self.max_depth - self.min_depth) + self.min_depth
else:
depth = sim_depth
XYZ_ego = self.convert_to_pointcloud(depth)
# Adding agent's height to the pointcloud
XYZ_ego[:, 1] += self.sensor_height
# Convert to grid coordinate system
V = self.map_size
Vby2 = V // 2
points = XYZ_ego
grid_x = (points[:, 0] / self.map_scale) + Vby2
grid_y = (points[:, 2] / self.map_scale) + V
# Filter out invalid points
valid_idx = (grid_x >= 0) & (grid_x <= V-1) & (grid_y >= 0) & (grid_y <= V-1)
points = points[valid_idx, :]
grid_x = grid_x[valid_idx].astype(int)
grid_y = grid_y[valid_idx].astype(int)
# Create empty maps for the two channels
obstacle_mat = np.zeros((self.map_size, self.map_size), np.uint8)
explore_mat = np.zeros((self.map_size, self.map_size), np.uint8)
# Compute obstacle locations
high_filter_idx = points[:, 1] < self.height_thresh[1]
low_filter_idx = points[:, 1] > self.height_thresh[0]
obstacle_idx = np.logical_and(low_filter_idx, high_filter_idx)
self.safe_assign(obstacle_mat, grid_y[obstacle_idx], grid_x[obstacle_idx], 1)
kernel = np.ones((3, 3), np.uint8)
obstacle_mat = cv2.dilate(obstacle_mat, kernel, iterations=1)
# Compute explored locations
explored_idx = high_filter_idx
self.safe_assign(explore_mat, grid_y[explored_idx], grid_x[explored_idx], 1)
kernel = np.ones((3, 3), np.uint8)
explore_mat = cv2.dilate(explore_mat, kernel, iterations=1)
# Smoothen the maps
kernel = np.ones((3, 3), np.uint8)
obstacle_mat = cv2.morphologyEx(obstacle_mat, cv2.MORPH_CLOSE, kernel)
explore_mat = cv2.morphologyEx(explore_mat, cv2.MORPH_CLOSE, kernel)
# Ensure all expanded regions in obstacle_mat are accounted for in explored_mat
explore_mat = np.logical_or(explore_mat, obstacle_mat)
return np.stack([obstacle_mat, explore_mat], axis=2)
def get_observation(
self, depth_img,
) -> object:
# convert to numpy array
sim_depth = np.expand_dims(asnumpy(depth_img), axis=-1)
ego_map_gt = self._get_depth_projection(sim_depth)
return ego_map_gt
class DummySimulatorMultiAgent:
def __init__(self, num_agents=2):
self.num_agents = num_agents
self.positions = [None] * num_agents
self.rotations = [None] * num_agents
self._sim_obs = None
self.position = None
self.rotation = None
def seed(self, seed):
pass
def set_agent_state(self, positions=[], rotations=[]):
for i in range(len(positions)):
self.positions[i] = np.array(positions[i], dtype=np.float32)
self.rotations[i] = rotations[i]
self.position = np.array(positions[0], dtype=np.float32)
self.rotation = rotations[0]
def get_agent_state(self):
class State:
def __init__(self, positions=[], rotations=[]):
self.positions = []
self.rotations = []
for i in range(len(positions)):
self.positions.append(positions[i])
self.rotations.append(rotations[i])
self.position = positions[0]
self.rotation = rotations[0]
return State(self.positions, self.rotations)
def set_sensor_observations(self, sim_obs):
self._sim_obs = sim_obs
def get_sensor_observations(self):
return self._sim_obs
def close(self):
pass
@registry.register_simulator()
class HabitatSimAudioEnabledMultiAgentActiveMapping(HabitatSim):
def action_space_shortest_path(self, source: AgentState, targets: List[AgentState], agent_id: int = 0) -> List[
ShortestPathPoint]:
pass
def __init__(self, config: Config) -> None:
"""Changes made to simulator wrapper over habitat-sim
This simulator allows two agents to have a conversation episode between them as per the Chat2Map task
Args:
config: configuration for initializing the simulator.
"""
super().__init__(config)
self.env_cfg = self.config.SIM_ENV
self.task_cfg = self.config.SIM_TASK
self.audio_cfg = self.config.AUDIO
self.passive_mapping_cfg = self.config.SIM_TRAINER
self.scene_dataset = self.config.SCENE_DATASET
self.rir_sampling_rate = self.audio_cfg.RIR_SAMPLING_RATE
self._max_valid_impulse_length = self.audio_cfg.MAX_VALID_IMPULSE_LENGTH_AFTER_REMOVING_LEADING_ZEROS
self.hop_length = self.audio_cfg.HOP_LENGTH
self.n_fft = self.audio_cfg.N_FFT
self.win_length = self.audio_cfg.WIN_LENGTH
self._anechoic_audio_slice_length = self.audio_cfg.ANECHOIC_AUDIO_SLICE_LENGTH
self._audio_wav_shape = self.task_cfg.CONTEXT_SELF_AUDIO_SENSOR.FEATURE_SHAPE
print(f"LOADING ANECHOIC AUDIO FOR train")
anechoic_audio_dir = self.audio_cfg.ANECHOIC_DIR
assert os.path.isdir(anechoic_audio_dir)
anechoic_audio_filenames = os.listdir(anechoic_audio_dir)
self._anechoic_filename_2_audioData = {}
for anechoic_audio_filename in anechoic_audio_filenames:
anechoic_audio_filePath = os.path.join(anechoic_audio_dir, anechoic_audio_filename)
assert os.path.isfile(anechoic_audio_filePath)
anechoic_audioSR, anechoic_audioData = wavfile.read(anechoic_audio_filePath)
assert anechoic_audioSR == self.rir_sampling_rate
assert anechoic_audio_filename.split(".")[0] not in self._anechoic_filename_2_audioData
self._anechoic_filename_2_audioData[anechoic_audio_filename.split(".")[0]] = anechoic_audioData
assert "CONTEXT_VIEW_POSE_SENSOR" in self.task_cfg.SENSORS
self._pose_feat_shape = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.FEATURE_SHAPE
self._add_truncated_gaussian_pose_noise = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.ADD_TRUNCATED_GAUSSIAN_NOISE
self._truncated_gaussian_pose_noise_cfg = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.TRUNCATED_GAUSSIAN_NOISE
# self._truncated_gaussian_pose_noise_random_multipliers = None
self._gaussian_pose_noise_multipliers = None
if self._add_truncated_gaussian_pose_noise:
assert os.path.isfile(self._truncated_gaussian_pose_noise_cfg.GAUSSIAN_NOISE_MULTIPLIERS_PATH)
with open(self._truncated_gaussian_pose_noise_cfg.GAUSSIAN_NOISE_MULTIPLIERS_PATH, "rb") as fi:
self._gaussian_pose_noise_multipliers = pickle.load(fi)
self.max_context_length = self.env_cfg.MAX_CONTEXT_LENGTH
self.visual_budget = self.env_cfg.VISUAL_BUDGET
self.max_query_length = self.env_cfg.MAX_QUERY_LENGTH
assert self.max_query_length == (self.config.ALL_AGENTS.NUM * self.max_context_length)
self.render_local_ego_occ_maps_from_depth_images = self.config.RENDER_LOCAL_EGO_OCC_MAPS_FROM_DEPTH_IMAGES
self.local_occMap_cfg = self.config.LOCAL_MAP
self.ego_mapper = None
self.redwood_depth_noise_dist_model = None
self.redwood_depth_noise_multiplier = None
if self.render_local_ego_occ_maps_from_depth_images:
self.ego_mapper = EgoMap(
map_size=self.local_occMap_cfg.SIZE,
map_scale=self.local_occMap_cfg.SCALE,
position=self.local_occMap_cfg.AGENT_POSITION,
depth_sensor_hfov=self.local_occMap_cfg.HFOV_DEPTH_IMG,
height_thresh=self.local_occMap_cfg.HEIGHT_THRESH,
depth_sensor_min_depth=self.local_occMap_cfg.MIN_DEPTH,
depth_sensor_max_depth=self.local_occMap_cfg.MAX_DEPTH,
depth_sensor_width=self.local_occMap_cfg.WIDTH_DEPTH_IMG,
depth_sensor_height=self.local_occMap_cfg.HEIGHT_DEPTH_IMG,
depth_sensor_normalize_depth=self.local_occMap_cfg.NORMALIZE_DEPTH_IMG,
)
if self.config.DEPTH_SENSOR.ADD_REDWOOD_NOISE:
"""src: https://github.com/facebookresearch/habitat-sim/blob/main/src_python/habitat_sim/sensors/noise_models/redwood_depth_noise_model.py"""
assert os.path.isfile(self.config.DEPTH_SENSOR.REDWOOD_DEPTH_NOISE_DIST_MODEL)
self.redwood_depth_noise_dist_model = np.load(self.config.DEPTH_SENSOR.REDWOOD_DEPTH_NOISE_DIST_MODEL)
self.redwood_depth_noise_dist_model = self.redwood_depth_noise_dist_model.reshape(80, 80, 5)
self.redwood_depth_noise_multiplier = self.config.DEPTH_SENSOR.REDWOOD_NOISE_MULTIPLIER
assert os.path.isfile(self.config.DEPTH_SENSOR.REDWOOD_NOISE_RAND_NUMS_PATH)
with open(self.config.DEPTH_SENSOR.REDWOOD_NOISE_RAND_NUMS_PATH, "rb") as fi:
self._redwood_depth_noise_rand_nums = pickle.load(fi)
self.stitch_top_down_maps = self.config.STITCH_TOP_DOWN_MAPS
self.rir_dir = self.audio_cfg.RIR_DIR
assert os.path.isdir(self.rir_dir)
self.num_agents = self.config.ALL_AGENTS.NUM
assert self.num_agents == 2
self.total_context_length = None
self.agent_utterance_allSwitches = None
self.lst_anechoicAudio_filenameNstartSamplingIdx = None
self.used_query_nodsNrots = None
self._current_context_rgb = None
self._current_context_ego_local_map = None
self._current_context_view_pose = None
self._current_context_view_rAz = None
self._previous_context_view_mask = None
self._current_context_selfAudio = None
self._current_context_otherAudio = None
self._current_context_otherAudio_pose = None
self._current_context_audio_mask = None
self._all_context_audio_mask = None
self._current_query_globCanMapEgoCrop_gt = None
self._current_query_globCanMapEgoCrop_gt_exploredPartMask = None
self._current_query_mask = None
self._all_query_mask = None
if self.stitch_top_down_maps:
self._current_stitched_query_globCanMapEgoCrop_gt = None
assert self.config.SCENE_DATASET in ["mp3d"],\
"SCENE_DATASET needs to be in ['mp3d']"
self._previous_receiver_position_indexs = [None] * self.num_agents
self._current_receiver_position_indexs = [None] * self.num_agents
self._previous_rotation_angles = [None] * self.num_agents
self._current_rotation_angles = [None] * self.num_agents
self._frame_cache = defaultdict(dict)
self._episode_count = 0
self._step_count = 0
self._view_count = self.num_agents
self._action = 1
self._is_episode_active = None
self._previous_step_collideds = [None] * self.num_agents
self._nodes_n_azimuths_lists = [None] * self.num_agents
self._position_to_index_mapping = dict() | self.points, self.graph = load_points_data(self.meta_dir, self.config.AUDIO.GRAPH_FILE, | 0 | 2023-12-06 01:20:37+00:00 | 4k |
noirbizarre/pdm-dockerize | src/pdm_dockerize/commands.py | [
{
"identifier": "ProjectEntrypoint",
"path": "src/pdm_dockerize/entrypoint.py",
"snippet": "class ProjectEntrypoint:\n project: Project\n hooks: HookManager\n\n @cached_property\n def settings(self) -> DockerizeSettings:\n return self.project.pyproject.settings.get(\"dockerize\", {})\n\n @cached_property\n def runner(self) -> TaskRunner:\n return TaskRunner(self.project, hooks=self.hooks)\n\n def select_scripts(self) -> list[str]:\n \"\"\"\n List all scripts eligible to docker entrypoint according filtering\n \"\"\"\n include = filters.parse(self.settings, \"include\")\n exclude = filters.parse(self.settings, \"exclude\")\n scripts = self.project.pyproject.settings.get(\"scripts\", {}).keys()\n scripts = [script for script in scripts if not script.startswith(\"_\")]\n included = [script for script in scripts if filters.match(script, include)]\n return [script for script in included if not filters.match(script, exclude)]\n\n def __str__(self) -> str:\n return self.as_script()\n\n def as_script(self) -> str:\n \"\"\"Render the `sh` entrypoint\"\"\"\n out = io.StringIO()\n\n out.write(\"#!/usr/bin/env sh\\n\\n\")\n out.write(\"set -eu\\n\\n\")\n out.write(\"dirname=$(dirname $0)\\n\")\n out.write('cmd=${1:-\"\"}\\n')\n out.write(\"[ $cmd ] && shift\\n\")\n out.write(\"cd $dirname > /dev/null\\n\")\n out.write(\"\\n\")\n out.write(self.export_env())\n out.write(\"\\n\")\n out.write(self.usage())\n out.write(\"\\n\")\n out.write(\"case $cmd in\\n\")\n\n for script in self.select_scripts():\n out.write(self.case(script))\n\n out.write(f\"{INDENT}*)\\n\")\n out.write(f\"{2 * INDENT}usage\\n\")\n out.write(f\"{2 * INDENT};;\\n\")\n out.write(\"esac\\n\")\n\n return out.getvalue()\n\n def export_env(self) -> str:\n \"\"\"Export the environment variables\"\"\"\n out = io.StringIO()\n path = [\"$(pwd)/bin\", \"$PATH\"]\n pythonpath = [\"$(pwd)/lib\"]\n if package_dir := self.get_package_dir():\n pythonpath.insert(0, package_dir)\n out.write(f\"export PYTHONPATH={':'.join(pythonpath)}\\n\")\n out.write(f\"export PATH={':'.join(path)}\\n\")\n return out.getvalue()\n\n def get_package_dir(self) -> str | None:\n \"\"\"An optional directory containing the project sources\"\"\"\n # TODO: find a better way to identify package-dir\n build_system = self.project.backend.build_system()\n if not build_system.get(\"build-backend\") == \"pdm.backend\":\n return None\n default = \"src\" if self.project.root.joinpath(\"src\").exists() else None\n pkgdir = self.project.pyproject.settings.get(\"build\", {}).get(\"package-dir\", default)\n return f\"$(pwd)/{pkgdir}\" if pkgdir else None\n\n def usage(self) -> str:\n \"\"\"Render the entrypoint usage/help\"\"\"\n out = io.StringIO()\n out.write(\"usage() {\\n\")\n out.write(f'{INDENT}echo \"Available commands\"\\n')\n out.write(f'{INDENT}echo \"==================\"\\n')\n\n for script in self.select_scripts():\n task = self.runner.get_task(script)\n if task is None:\n continue\n if task.kind == \"cmd\" and isinstance(task.args, list):\n description = \" \".join(task.args)\n else:\n description = task.short_description\n if \"\\n\" in description:\n description = f\"{description.splitlines()[0]}…\"\n out.write(f'{INDENT}echo \"{script}: {description}\"\\n')\n out.write(\"}\\n\")\n return out.getvalue()\n\n def case(self, script: str) -> str:\n \"\"\"Render a script case for a given task\"\"\"\n task = self.runner.get_task(script)\n out = io.StringIO()\n out.write(f\"{INDENT}{task.name})\\n\")\n\n if pre := self.runner.get_task(f\"pre_{script}\"):\n out.write(self.script_for(pre))\n\n out.write(self.script_for(task))\n\n if post := self.runner.get_task(f\"post_{script}\"):\n out.write(self.script_for(post))\n\n out.write(f\"{2 * INDENT};;\\n\")\n return out.getvalue()\n\n def script_for(self, task: Task, params: str | None = None) -> str:\n \"\"\"Render the script part for a single task\"\"\"\n out = io.StringIO()\n opts = exec_opts(self.runner.global_options, task.options)\n if (envfile := opts.get(\"env_file\")) and isinstance(envfile, str):\n out.write(self.source_env(envfile))\n\n for var, value in opts.get(\"env\", {}).items():\n out.write(f'{2 * INDENT}{var}=\"{value}\"\\n')\n\n if isinstance(envfile, dict) and (override := envfile.get(\"override\")):\n out.write(self.source_env(override))\n\n if task.kind == \"call\":\n out.write(self.call_script(task))\n elif task.kind == \"cmd\":\n out.write(self.cmd_script(task, params))\n elif task.kind == \"composite\":\n out.write(self.composite_script(task, params))\n else:\n out.write(self.shell_script(task, params))\n return out.getvalue()\n\n def source_env(self, envfile: str) -> str:\n out = io.StringIO()\n out.write(f\"{2 * INDENT}set -o allexport\\n\")\n out.write(f\"{2 * INDENT}[ -f {envfile} ] && . {envfile} \")\n out.write(f\"|| echo '{envfile} is ignored as it does not exist.'\\n\")\n out.write(f\"{2 * INDENT}set +o allexport\\n\")\n return out.getvalue()\n\n def cmd_script(self, task: Task, params: str | None = None) -> str:\n if isinstance(task.args, str):\n script, interpolated = self.interpolate(task.args)\n script = \" \".join(shlex.split(script, posix=False))\n else:\n script, interpolated = self.interpolate(shlex.join(task.args))\n if not (params or interpolated):\n params = '\"$@\"'\n if params:\n script += f\" {params}\"\n return f\"{2 * INDENT}{script}\\n\"\n\n def call_script(self, task: Task) -> str:\n if not (m := RE_CALL.match(task.args)):\n raise ValueError(\"Unparsable call task {tasks.name}: {tasks.args}\")\n pkg = m.group(\"pkg\")\n fn = m.group(\"fn\")\n args = m.group(\"args\") or \"\"\n return f'{2 * INDENT}python -c \"from {pkg} import {fn}; {fn}({args})\"\\n'\n\n def shell_script(self, task: Task, params: str | None = None) -> str:\n out = io.StringIO()\n args, interpolated = self.interpolate(task.args)\n lines = args.splitlines()\n for idx, line in enumerate(lines, 1):\n out.write(f\"{2 * INDENT}{line}\")\n if idx == len(lines):\n if params:\n out.write(f\" {params}\")\n if not interpolated:\n out.write(' \"$@\"')\n out.write(\"\\n\")\n return out.getvalue()\n\n def composite_script(self, task: Task, params: str | None = None) -> str:\n out = io.StringIO()\n cmds, interpolated = zip(*(self.interpolate(cmd) for cmd in task.args))\n if not params and not any(interpolated):\n params = '\"$@\"'\n for cmd in cmds:\n args = shlex.split(cmd, posix=False)\n if inline := self.runner.get_task(args[0]):\n args = args[1:]\n script = \" \".join(args)\n if params:\n script += f\" {params}\"\n out.write(self.script_for(inline, script))\n else:\n out.write(f\"{2 * INDENT}{' '.join(args)} {params or ''}\\n\")\n return out.getvalue()\n\n def interpolate(self, script: str) -> tuple[str, bool]:\n \"\"\"Interpolate the `{args:[defaults]} placeholder in a string\"\"\"\n\n def replace(m: re.Match[str]) -> str:\n if default := m.group(\"default\"):\n return f'\"${{@:-{default}}}\"'\n return '\"$@\"'\n\n interpolated, count = RE_ARGS_PLACEHOLDER.subn(replace, script)\n return interpolated, count > 0"
},
{
"identifier": "DockerizeSynchronizer",
"path": "src/pdm_dockerize/installer.py",
"snippet": "class DockerizeSynchronizer(Synchronizer):\n \"\"\"A `Synchronizer` using the `DockerizeInstallManager`\"\"\"\n\n def get_manager(self) -> InstallManager:\n return DockerizeInstallManager(self.environment, use_install_cache=self.use_install_cache)"
}
] | import argparse
import os
from pathlib import Path
from pdm.cli import actions
from pdm.cli.commands.base import BaseCommand
from pdm.cli.filters import GroupSelection
from pdm.cli.hooks import HookManager
from pdm.cli.options import Option, dry_run_option, groups_group, lockfile_option
from pdm.cli.utils import check_project_file
from pdm.environments import PythonLocalEnvironment
from pdm.project import Project
from .entrypoint import ProjectEntrypoint
from .installer import DockerizeSynchronizer | 2,629 | from __future__ import annotations
class DockerizeEnvironment(PythonLocalEnvironment):
"""An environment installaing into the dist/docker directory"""
def __init__(
self, project: Project, *, target: str | None = None, python: str | None = None
) -> None:
super().__init__(project, python=python)
self.target = Path(target) if target else None
@property
def packages_path(self) -> Path:
return self.target or self.project.root / "dist/docker"
class DockerizeCommand(BaseCommand):
"""Generate content for a Docker image"""
arguments = (
Option(
"target",
nargs="?",
help="The target into which the docker assets will be generated (default: dist/docker)",
),
*BaseCommand.arguments,
groups_group,
dry_run_option,
lockfile_option,
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
check_project_file(project)
actions.check_lockfile(project)
selection = GroupSelection.from_options(project, options)
hooks = HookManager(project)
env = DockerizeEnvironment(project, target=options.target)
requirements = []
selection.validate()
for group in selection:
requirements.extend(project.get_dependencies(group).values())
candidates = actions.resolve_candidates_from_lockfile(project, requirements)
synchronizer = DockerizeSynchronizer(
candidates,
env,
dry_run=options.dry_run,
clean=False,
no_editable=True,
reinstall=False,
only_keep=False,
install_self=False,
fail_fast=True,
use_install_cache=False,
)
synchronizer.synchronize()
entrypoint = env.packages_path / "entrypoint"
| from __future__ import annotations
class DockerizeEnvironment(PythonLocalEnvironment):
"""An environment installaing into the dist/docker directory"""
def __init__(
self, project: Project, *, target: str | None = None, python: str | None = None
) -> None:
super().__init__(project, python=python)
self.target = Path(target) if target else None
@property
def packages_path(self) -> Path:
return self.target or self.project.root / "dist/docker"
class DockerizeCommand(BaseCommand):
"""Generate content for a Docker image"""
arguments = (
Option(
"target",
nargs="?",
help="The target into which the docker assets will be generated (default: dist/docker)",
),
*BaseCommand.arguments,
groups_group,
dry_run_option,
lockfile_option,
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
check_project_file(project)
actions.check_lockfile(project)
selection = GroupSelection.from_options(project, options)
hooks = HookManager(project)
env = DockerizeEnvironment(project, target=options.target)
requirements = []
selection.validate()
for group in selection:
requirements.extend(project.get_dependencies(group).values())
candidates = actions.resolve_candidates_from_lockfile(project, requirements)
synchronizer = DockerizeSynchronizer(
candidates,
env,
dry_run=options.dry_run,
clean=False,
no_editable=True,
reinstall=False,
only_keep=False,
install_self=False,
fail_fast=True,
use_install_cache=False,
)
synchronizer.synchronize()
entrypoint = env.packages_path / "entrypoint" | entrypoint.write_text(ProjectEntrypoint(project, hooks).as_script()) | 0 | 2023-12-13 23:35:23+00:00 | 4k |
wrongbad/badcad | badcad/badcad.py | [
{
"identifier": "display",
"path": "badcad/utils.py",
"snippet": "def display(thing, \n vscode_fix=True, \n wireframe=False, \n color='#aaaa22', \n smoothing_threshold=-1,\n width=640,\n height=640,\n ):\n if vscode_fix:\n fix_vscode_style()\n \n if isinstance(thing, (tuple, list)):\n verts, tris = thing\n elif hasattr(thing, 'to_mesh'):\n m = thing.to_mesh()\n verts = m.vert_properties[...,:3].astype(np.float32)\n tris = m.tri_verts.astype(np.uint32)\n else:\n raise ValueError(f'unsupported thing: {type(thing)}')\n\n box0 = np.min(verts, axis=0)\n box1 = np.max(verts, axis=0)\n\n sz = np.linalg.norm(box1-box0)\n mid = (box0+box1)/2\n\n verts = verts - mid\n tnormals = triangle_normals(verts, tris)\n vnormals = smooth_normals(tris, tnormals, smoothing_threshold)\n verts = verts[tris]\n index = np.arange(tris.size, dtype=np.uint32)\n\n geometry = pythreejs.BufferGeometry(\n attributes = dict(\n position = pythreejs.BufferAttribute(verts),\n normal = pythreejs.BufferAttribute(vnormals),\n ),\n index = pythreejs.BufferAttribute(index)\n )\n\n material = pythreejs.MeshPhysicalMaterial(\n color = color,\n reflectivity = 0.2,\n clearCoat = 0.6,\n clearCoatRoughness = 0.7,\n wireframe = wireframe,\n );\n\n threemesh = pythreejs.Mesh(geometry, material)\n\n lights = [\n pythreejs.DirectionalLight(\n color='white', \n position=l[:3],\n intensity=l[3],\n )\n for l in [\n (-40, 5, 40, 0.5), \n (0, 0, 40, 0.2), \n (20, 5, -20, 0.1), \n ]\n ]\n\n camera = pythreejs.PerspectiveCamera(\n position=[0, 0, sz*1.3], \n up=[0, 1, 0], \n children=lights,\n )\n\n controls = pythreejs.OrbitControls(\n controlling=camera, \n rotateSpeed=1.0, \n zoomSpeed=0.5,\n enableZoom=False, # avoid notbook scroll conflict\n )\n\n scene = pythreejs.Scene(\n children=[\n threemesh,\n camera, \n pythreejs.AmbientLight(color='#aaf')\n ], \n background=None,\n )\n\n return pythreejs.Renderer(\n camera=camera,\n scene=scene,\n alpha=True,\n clearOpacity=0.2,\n controls=[controls],\n width=width, \n height=height,\n )"
},
{
"identifier": "triangle_normals",
"path": "badcad/utils.py",
"snippet": "def triangle_normals(verts, tris):\n a = verts[tris[:,1]] - verts[tris[:,0]]\n b = verts[tris[:,2]] - verts[tris[:,1]]\n tnormals = np.cross(a, b)\n tnormals /= np.linalg.norm(tnormals, axis=-1, keepdims=True)\n return tnormals"
},
{
"identifier": "polygon_nearest_alignment",
"path": "badcad/utils.py",
"snippet": "def polygon_nearest_alignment(va, vb):\n dist = lambda x: np.sum(x ** 2, axis=-1)\n j0 = np.argmin(dist(vb - va[0]))\n i, j = 0, j0\n na, nb = len(va), len(vb)\n out = []\n while True:\n ip1, jp1 = (i+1)%na, (j+1)%nb\n d0 = dist(va[ip1] - vb[j])\n d1 = dist(va[i] - vb[jp1])\n if d0 < d1:\n out += [[ip1, j]]\n i = ip1\n else:\n out += [[i, jp1]]\n j = jp1\n if (i,j) == (0, j0):\n break\n return out"
},
{
"identifier": "svg2polygons",
"path": "badcad/utils.py",
"snippet": "def svg2polygons(svg, fn=8):\n import svgelements\n # this lib handles transforms and `use` tags\n svg = svgelements.SVG.parse(BytesIO(svg))\n polys = []\n for e in svg.elements():\n if isinstance(e, svgelements.Path):\n # TODO policy for unclosed paths\n p = PolyPath(fn=fn)\n for s in e.segments():\n if isinstance(s, svgelements.Move):\n p.move(s.end)\n elif isinstance(s, svgelements.Line):\n p.line(s.end)\n elif isinstance(s, svgelements.QuadraticBezier):\n p.bez([s.control1, s.end])\n elif isinstance(s, svgelements.CubicBezier):\n p.bez([s.control1, s.control2, s.end])\n elif isinstance(s, svgelements.Close):\n p.close()\n else:\n raise ValueError(f'unsupported segment: {type(s)}')\n polys += p.polys\n return polys"
},
{
"identifier": "text2svg",
"path": "badcad/utils.py",
"snippet": "def text2svg(text, size=10, font=\"Helvetica\"):\n import cairo\n memfile = BytesIO()\n with cairo.SVGSurface(memfile, size, size) as surface:\n ctx = cairo.Context(surface)\n ctx.set_font_size(size)\n ctx.select_font_face(font,\n cairo.FONT_SLANT_NORMAL,\n cairo.FONT_WEIGHT_NORMAL)\n ctx.show_text(text)\n return memfile.getvalue()"
},
{
"identifier": "PolyPath",
"path": "badcad/utils.py",
"snippet": "class PolyPath:\n def __init__(self, fn=32):\n self.polys = []\n self.poly = []\n self.pos = (0,0)\n self.fn = fn\n\n def move(self, p):\n self.pos = p\n return self\n \n def line(self, p):\n if len(self.poly) == 0:\n self.poly += [self.pos]\n self.poly += [p]\n self.pos = p\n return self\n\n def bez(self, pts, fn=0):\n if len(self.poly) == 0:\n self.poly += [self.pos]\n fn = fn or self.fn\n vs = [p[0]+p[1]*1j for p in [self.pos, *pts]]\n for i in range(1, fn):\n n = len(vs) - 1\n t = i / fn\n u = 1 - t\n c = u ** n\n v = 0\n for j in range(len(vs)):\n v += c * vs[j]\n c *= t * (n-j) / (u * (1+j))\n self.poly += [(v.real, v.imag)]\n self.poly += [pts[-1]]\n self.pos = pts[-1]\n return self\n\n def close(self):\n self.polys += [self.poly]\n self.poly = []"
}
] | import manifold3d
import numpy as np
from manifold3d import Manifold, CrossSection
from .utils import (
display,
triangle_normals,
polygon_nearest_alignment,
svg2polygons,
text2svg,
PolyPath
) | 1,921 |
# wrapper for Manifold
# adds jupyter preview & tweaks API
class Solid:
def __init__(self, manifold = Manifold()):
self.manifold = manifold
# TODO add visual properties (e.g. color, texture)
def _repr_mimebundle_(self, **kwargs):
if self.is_empty():
return None
raw_mesh = self.to_mesh()
verts = raw_mesh.vert_properties.astype(np.float32)
tris = raw_mesh.tri_verts.astype(np.uint32)
|
# wrapper for Manifold
# adds jupyter preview & tweaks API
class Solid:
def __init__(self, manifold = Manifold()):
self.manifold = manifold
# TODO add visual properties (e.g. color, texture)
def _repr_mimebundle_(self, **kwargs):
if self.is_empty():
return None
raw_mesh = self.to_mesh()
verts = raw_mesh.vert_properties.astype(np.float32)
tris = raw_mesh.tri_verts.astype(np.uint32) | renderer = display((verts, tris)) | 0 | 2023-12-11 01:48:22+00:00 | 4k |
Kokonico/ObjLog | tests/test_tests.py | [
{
"identifier": "LogMessage",
"path": "objlog/Base/LogMessage.py",
"snippet": "class LogMessage:\n \"\"\"a base message to be logged\n Attributes:\n color\n level (name)\n\n WARNING: this class should not be used directly, use a subclass instead\n it is designed to be used as a base class for other classes, and will not work properly if used directly.\n \"\"\"\n\n def __init__(self, message):\n self.message = str(message)\n self.timestamp = datetime.now()\n self.unix_timestamp = time_ns() // 1_000_000 # deprecated, use self.unix instead\n self.unix = time_ns() // 1_000_000\n # create uuid\n self.uuid = f\"{time_ns()}-{random.randint(0, 1000)}\"\n try:\n t1 = self.color\n t2 = self.level\n except AttributeError:\n raise TypeError(\"this class should not be used directly, use a subclass instead\")\n\n def __str__(self):\n return f\"[{self.timestamp}] {self.level}: {self.message}\"\n\n def __repr__(self):\n return f\"{self.level}: {self.message}\"\n\n def __eq__(self, other):\n return self.uuid == other.uuid\n\n def __ne__(self, other):\n return self.uuid != other.uuid\n\n def colored(self) -> str:\n \"\"\"return a colored version of the message\"\"\"\n return f\"{self.color}[{self.timestamp}] {self.level}: {self.message}\\033[0m\""
},
{
"identifier": "LogNode",
"path": "objlog/Base/LogNode.py",
"snippet": "class LogNode:\n \"\"\"A LogNode, the main class of the ObjLogger. It can log messages to a file, to the console, or both.\"\"\"\n\n open = open # this code is probably the reason why my dad left me\n\n # this is clearly not a good way to do this, but I don't know how to do it better\n\n # if anyone can prevent doing this, and fix the exception caused when deleting a LogNode, please do it\n\n # else please increment this number by 1\n # thank you\n\n # total_failed_attempts_to_fix_this = 1\n\n def __init__(self, name: str, log_file: str | None = None, print_to_console: bool = False,\n print_filter: list | None = None, max_messages_in_memory: int = 500, max_log_messages: int = 1000,\n log_when_closed: bool = True, wipe_log_file_on_init: bool = False):\n self.log_file = log_file\n self.name = name\n self.print = print_to_console\n self.messages = deque(maxlen=max_messages_in_memory)\n self.max = max_messages_in_memory\n self.maxinf = max_log_messages\n self.print_filter = print_filter\n self.log_closure_message = log_when_closed\n self.log_len = 0\n\n # check if log exists (in file system), and if so, clear it\n if isinstance(log_file, str) and wipe_log_file_on_init:\n with open(log_file, \"w+\") as f:\n f.write(\"\")\n\n def log(self, message, override_log_file: str | None = None, force_print: tuple[bool, bool] = (False, False),\n preserve_message_in_memory: bool = True) -> None:\n \"\"\"log a message\"\"\"\n # make sure it's a LogMessage or its subclass\n if not isinstance(message, LogMessage):\n raise TypeError(\"message must be a LogMessage or its subclass\")\n if preserve_message_in_memory:\n self.messages.append(message)\n\n if isinstance(self.log_file, str) or isinstance(override_log_file, str):\n message_str = f\"[{self.name}] {str(message)}\"\n\n # log it\n with open(self.log_file, \"a+\") as f:\n # move the file pointer to the beginning of the file\n f.seek(0)\n\n # check if the amount of messages in the file is bigger than/equal to the max\n if self.log_len > self.maxinf:\n # if so, crop the file's oldest messages recursively until it's smaller than (or equal to) the max\n lines = f.readlines()\n lines = lines[-self.maxinf + 1:] # scuffed code, do not touch\n with open(self.log_file, \"w\") as f2:\n f2.writelines(lines)\n self.log_len = len(lines)\n\n # write the message\n f.write(message_str + '\\n')\n self.log_len += 1\n\n if (self.print or force_print[0]) and (\n self.print_filter is None or isinstance(message, tuple(self.print_filter))):\n if force_print[1] and force_print[0]:\n print(f\"[{self.name}] {message.colored()}\")\n elif force_print[0] is False and self.print:\n print(f\"[{self.name}] {message.colored()}\")\n\n def set_output_file(self, file: str | None, preserve_old_messages: bool = False) -> None:\n \"\"\"set log output file.\"\"\"\n if self.log_file == file:\n return # if the file is the same, do nothing\n\n self.log_file = file\n if preserve_old_messages and isinstance(file, str):\n for i in self.messages:\n self.log(i, preserve_message_in_memory=False, override_log_file=file, force_print=(True, False))\n\n def dump_messages(self, file: str, elementfilter: list | None = None,\n wipe_messages_from_memory: bool = False) -> None:\n \"\"\"dump all logged messages to a file, also filtering them if needed\"\"\"\n if elementfilter is not None:\n with open(file, \"a\") as f:\n for i in self.messages:\n if isinstance(i, tuple(elementfilter)):\n f.write(str(i) + '\\n')\n else:\n with open(file, \"a\") as f:\n f.write('\\n'.join(map(str, self.messages)))\n if wipe_messages_from_memory:\n self.wipe_messages()\n\n def filter(self, typefilter: list, filter_logfiles: bool = False) -> None:\n \"\"\"filter messages saved in memory, optionally the logfiles too\"\"\"\n self.messages = list(filter(lambda x: isinstance(x, tuple(typefilter)), self.messages))\n if filter_logfiles:\n if isinstance(self.log_file, str):\n with open(self.log_file, \"w\") as f:\n for i in self.messages:\n f.write(str(i) + '\\n')\n\n def dump_messages_to_console(self, elementfilter: list | None = None) -> None:\n \"\"\"dump all logged messages to the console, also filtering them if needed\"\"\"\n for i in self.messages:\n if elementfilter is None or (elementfilter is not None and isinstance(i, tuple(elementfilter))):\n self.log(i, force_print=(True, True), preserve_message_in_memory=False)\n\n def wipe_messages(self, wipe_logfiles: bool = False) -> None:\n \"\"\"wipe all messages from memory, can free up a lot of memory if you have a lot of messages,\n but you won't be able to dump the previous messages to a file\"\"\"\n self.messages = []\n if wipe_logfiles:\n self.clear_log()\n\n def clear_log(self) -> None:\n \"\"\"clear the log file\"\"\"\n if isinstance(self.log_file, str):\n with open(self.log_file, \"w\") as f:\n f.write(\"\")\n self.log_len = 0\n\n def set_max_messages_in_memory(self, max_messages: int) -> None:\n \"\"\"set the maximum amount of messages to be saved in memory\"\"\"\n self.max = max_messages\n self.messages = deque(self.messages, maxlen=self.max)\n\n def set_max_messages_in_log(self, max_file_size: int) -> None:\n \"\"\"set the maximum message limit of the log file\"\"\"\n self.maxinf = max_file_size\n # crop the file if it's too big\n if isinstance(self.log_file, str):\n with open(self.log_file, \"r+\") as f:\n if self.log_len >= self.maxinf:\n lines = f.readlines()\n lines = lines[-self.maxinf:]\n f.seek(0)\n f.truncate()\n f.writelines(lines)\n self.log_len = len(lines)\n\n def get(self, element_filter: list | None) -> list:\n \"\"\"get all messages saved in memory, optionally filtered\"\"\"\n if element_filter is None:\n return list(self.messages)\n else:\n return list(filter(lambda x: isinstance(x, tuple(element_filter)), self.messages))\n\n def combine(self, other: 'LogNode', merge_log_files: bool = True) -> None:\n \"\"\"combine two LogNodes.\"\"\"\n self.messages.extend(other.messages)\n\n if merge_log_files:\n self.clear_log()\n with open(self.log_file, \"w\") as f:\n for i in self.messages:\n f.write(str(i) + '\\n')\n\n def squash(self, message: LogMessage) -> None:\n \"\"\"squash the lognode, i.e. replace all messages with a single message\"\"\"\n self.messages.clear()\n self.messages.append(message)\n\n def __repr__(self):\n return f\"LogNode {self.name} at output {self.log_file}\" if isinstance(self.log_file, str) else \\\n f\"LogNode {self.name} at output console\" if self.print else f\"LogNode {self.name} at output None\"\n\n def __len__(self):\n return len(self.messages)\n\n def __contains__(self, item: LogMessage):\n return item in self.messages\n\n def __del__(self):\n # log the deletion\n if self.log_closure_message:\n self.log(Debug(\"LogNode closed.\"))\n # python will delete self automatically (thanks python)"
},
{
"identifier": "Debug",
"path": "objlog/LogMessages.py",
"snippet": "class Debug(LogMessage):\n \"\"\"the default debug message, with blue color\"\"\"\n level = \"DEBUG\"\n color = \"\\033[94m\""
},
{
"identifier": "Info",
"path": "objlog/LogMessages.py",
"snippet": "class Info(LogMessage):\n \"\"\"the default info message, with green color\"\"\"\n level = \"INFO\"\n color = \"\\033[92m\""
},
{
"identifier": "Warn",
"path": "objlog/LogMessages.py",
"snippet": "class Warn(LogMessage):\n \"\"\"the default warn message, with yellow color\"\"\"\n level = \"WARN\"\n color = \"\\033[93m\""
},
{
"identifier": "Error",
"path": "objlog/LogMessages.py",
"snippet": "class Error(LogMessage):\n \"\"\"the default error message, with red color\"\"\"\n level = \"ERROR\"\n color = \"\\033[91m\""
},
{
"identifier": "Fatal",
"path": "objlog/LogMessages.py",
"snippet": "class Fatal(LogMessage):\n \"\"\"the default fatal message, with pink color\"\"\"\n level = \"FATAL\"\n color = \"\\033[95m\""
}
] | import unittest
import random
import os
from objlog import LogNode, LogMessage
from objlog.LogMessages import Debug, Info, Warn, Error, Fatal | 2,650 | """test the functionality of the logger"""
def gen_random_messages(amount: int, extra_classes: list | None = None):
"""generate random messages"""
messages = []
if extra_classes is None:
extra_classes = []
for i in range(amount):
| """test the functionality of the logger"""
def gen_random_messages(amount: int, extra_classes: list | None = None):
"""generate random messages"""
messages = []
if extra_classes is None:
extra_classes = []
for i in range(amount): | messages.append(random.choice([Debug, Info, Warn, Error, Fatal] + extra_classes)("This is a random message")) | 3 | 2023-12-08 20:41:18+00:00 | 4k |
anyquest/pyaq | aq/activities/generate.py | [
{
"identifier": "BaseActivity",
"path": "aq/activities/activity.py",
"snippet": "class BaseActivity:\n MAX_ITERATIONS = 42\n\n async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:\n pass\n\n @staticmethod\n def merge_inputs(inputs: Dict[str, Any]) -> str:\n values = []\n for key, val in inputs.items():\n if isinstance(val, list):\n values.append(\"\\n\".join(val))\n else:\n values.append(val)\n return \"\\n\\n\".join(values)\n\n @staticmethod\n def merge_inputs_json(inputs: Dict[str, Any], indent=2) -> str:\n rval = {}\n for key, val in inputs.items():\n try:\n rval[key] = [json.loads(elem) for elem in val] if isinstance(val, list) else json.loads(val)\n except json.JSONDecodeError:\n rval[key] = val\n if len(rval.keys()) == 1:\n return json.dumps(next(iter(rval.values())), indent=indent)\n else:\n return json.dumps(rval, indent=indent)\n\n @staticmethod\n def generate_temp_filename(prefix, extension, length=8):\n random_string = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(length))\n return f\"{prefix}_{random_string}.{extension}\"\n\n @staticmethod\n def render(template: str, inputs: Dict[str, Any]) -> str:\n # Call a function to process path expressions\n str_template = template\n expr = r'{{([^.\\[]+)([.\\[])(.*)}}'\n for match in re.finditer(expr, template):\n activity_name = match.group(1)\n start_of_expression = match.group(2)\n path_expression = match.group(3)\n str_template = str_template.replace(match.group(0),\n '{{' +\n f'jsonpath({activity_name}, \"${start_of_expression}{path_expression}\")'\n + '}}',\n 1)\n\n # Render the template\n jinja_template = Template(str_template)\n jinja_template.globals.update({\n \"jsonpath\": jsonpath\n })\n return jinja_template.render(inputs)\n\n @staticmethod\n def render_prompt(template: str, inputs: Dict[str, Any]) -> str | List[Content]:\n text = BaseActivity.render(template, inputs)\n\n contents = []\n for key in inputs:\n if isinstance(inputs[key], str) and inputs[key].startswith(\"data:image\"):\n contents.append(Content(type=\"image_url\", image_url=inputs[key]))\n\n if len(contents) > 0:\n contents.insert(0, Content(type=\"text\", text=text))\n return contents\n else:\n return text"
},
{
"identifier": "ActivityError",
"path": "aq/activities/activity.py",
"snippet": "class ActivityError(Exception):\n pass"
},
{
"identifier": "ProviderManager",
"path": "aq/providers/manager.py",
"snippet": "class ProviderManager:\n def __init__(self, openai_provider: OpenAIProvider,\n azure_provider: AzureProvider,\n anthropic_provider: AnthropicProvider,\n llava_provider: LlavaProvider,\n gemini_provider: GeminiProvider):\n self._providers = {\n ModelProvider.OPENAI: openai_provider,\n ModelProvider.AZURE: azure_provider,\n ModelProvider.GEMINI: gemini_provider,\n ModelProvider.ANTHROPIC: anthropic_provider,\n ModelProvider.LLAVA: llava_provider\n }\n self._logger = logging.getLogger(self.__class__.__name__)\n\n def get_provider(self, provider_type: ModelProvider) -> BaseProvider:\n return self._providers[provider_type]"
},
{
"identifier": "ChatCompletionMessage",
"path": "aq/providers/types/chat.py",
"snippet": "class ChatCompletionMessage(BaseModel):\n role: str\n content: Optional[str | List[Content]] = \"\"\n name: Optional[str] = None\n tool_call_id: Optional[str] = None\n tool_calls: Optional[List[ToolCall]] = None"
},
{
"identifier": "ChatCompletionRequest",
"path": "aq/providers/types/chat.py",
"snippet": "class ChatCompletionRequest(BaseModel):\n model: str\n messages: List[ChatCompletionMessage]\n tools: Optional[List[Tool]] = None\n response_format: Optional[ResponseFormat] = None\n tool_choice: Optional[str] = None\n temperature: float = 0.5\n presence_penalty: float = 0.0\n frequency_penalty: float = 0.0\n max_tokens: int = 1000"
},
{
"identifier": "Choice",
"path": "aq/providers/types/chat.py",
"snippet": "class Choice(BaseModel):\n index: int\n message: ChatCompletionMessage\n finish_reason: Optional[str] = None"
},
{
"identifier": "Tool",
"path": "aq/providers/types/chat.py",
"snippet": "class Tool(BaseModel):\n type: Literal[\"function\"] = \"function\"\n function: Function"
},
{
"identifier": "ResponseFormat",
"path": "aq/providers/types/chat.py",
"snippet": "class ResponseFormat(BaseModel):\n type: Literal[\"json_object\"] = \"json_object\""
},
{
"identifier": "ToolCall",
"path": "aq/providers/types/chat.py",
"snippet": "class ToolCall(BaseModel):\n id: str\n type: str\n function: FunctionCall"
},
{
"identifier": "ToolManager",
"path": "aq/tools/manager.py",
"snippet": "class ToolManager:\n def __init__(self, web_tool: WebTool, rest_tool: RestTool):\n self._tools = {\n ToolType.WEB: web_tool,\n ToolType.REST: rest_tool\n }\n\n def get_tool(self, tool_type: ToolType) -> BaseTool:\n return self._tools.get(tool_type, None)"
},
{
"identifier": "App",
"path": "aq/types/app.py",
"snippet": "class App(BaseModel):\n aq: str\n info: AppInfo\n models: Optional[Dict[str, Model]] = None\n memory: Optional[Dict[str, MemoryDef]] = None\n tools: Optional[Dict[str, ToolDef]] = None\n activities: Dict[str, Activity]"
},
{
"identifier": "Activity",
"path": "aq/types/app.py",
"snippet": "class Activity(BaseModel):\n type: ActivityType\n tools: Optional[List[str]] = None\n inputs: Optional[List[ActivityInput]] = None\n models: Optional[List[str]] = None\n memory: Optional[List[str]] = None\n parameters: Dict[str, Any] = {}"
},
{
"identifier": "ActivityJob",
"path": "aq/types/job.py",
"snippet": "class ActivityJob:\n id: str\n activity_name: str\n app_job: AppJob\n state: JobState\n output: str\n output_type: str = \"text/plain\"\n\n def __init__(self, activity_name: str, app_job: AppJob):\n self.id = str(uuid.uuid4())\n self.activity_name = activity_name\n self.app_job = app_job\n self.state = JobState.CREATED\n self.output = \"\"\n\n @property\n def finished(self) -> bool:\n return self.state == JobState.SUCCESS or self.state == JobState.ERROR"
},
{
"identifier": "JobState",
"path": "aq/types/job.py",
"snippet": "class JobState(Enum):\n ANY = 0,\n CREATED = 1,\n RUNNING = 2,\n SUCCESS = 3\n ERROR = 4"
}
] | import json
import logging
import time
from typing import Dict, Any, List
from .activity import BaseActivity, ActivityError
from ..providers import ProviderManager
from ..providers.types import ChatCompletionMessage, ChatCompletionRequest, Choice, Tool, ResponseFormat, ToolCall
from ..tools import ToolManager
from ..types import ActivityJob, JobState, Activity, App | 2,405 |
class GenerateActivity(BaseActivity):
TOOL_NAME_DELIMITER = "__"
def __init__(self, provider_manager: ProviderManager, tool_manager: ToolManager):
self._logger = logging.getLogger(self.__class__.__name__)
self._provider_manager = provider_manager
self._tool_manager = tool_manager
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
app = activity_job.app_job.app
activity = app.activities[activity_job.activity_name]
if len(activity.models) < 1:
raise ActivityError(f"A model is required")
model = app.models[activity.models[0]]
temperature = float(activity.parameters.get("temperature", model.parameters.get("temperature", 0.5)))
max_tokens = int(activity.parameters.get("max_words", model.parameters.get("max_words", 500))*4/3)
messages = []
profile = app.info.profile
if profile:
messages.append(ChatCompletionMessage(role="system", content=profile))
json_format = activity.parameters.get("format", None) == "json"
if json_format:
messages.append(ChatCompletionMessage(
role="system",
content="Provide your response as a JSON object."))
else:
messages.append(ChatCompletionMessage(
role="system",
content="Use the tab length of two spaces when formatting nested lists in markdown."))
tools = await self.get_tools(app, activity)
if tools:
messages.append(ChatCompletionMessage(
role="system",
content="Think step-by-step. Perform as many iterations as necessary "
"to accomplish your goal using the tools provided."))
prompt_template = activity.parameters["prompt"]
prompt = self.render_prompt(prompt_template, inputs)
messages.append(ChatCompletionMessage(role="user", content=prompt))
parts = []
start_time = time.perf_counter()
provider = self._provider_manager.get_provider(model.provider)
for x in range(self.MAX_ITERATIONS):
request = ChatCompletionRequest(
model=model.model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
tools=tools if tools else None,
tool_choice="auto" if tools else None,
response_format=ResponseFormat(type="json_object") if json_format else None
)
response = await provider.create_completion(request)
choice: Choice = response.choices[0]
message: ChatCompletionMessage = choice.message
messages.append(message)
if choice.finish_reason == "tool_calls":
for tool_call in message.tool_calls:
tool_result = await self.process_tool_call(tool_call, app)
messages.append(tool_result)
else:
if message.content:
parts.append(message.content)
if choice.finish_reason:
self._logger.debug(f"Finished with reason {choice.finish_reason} "
f"in {int(time.perf_counter()-start_time)} sec.")
break
activity_job.state = JobState.SUCCESS
activity_job.output = "\n\n".join(parts)
activity_job.output_type = "text/markdown"
except Exception as e:
self._logger.error(e)
activity_job.state = JobState.ERROR
activity_job.output = str(e)
|
class GenerateActivity(BaseActivity):
TOOL_NAME_DELIMITER = "__"
def __init__(self, provider_manager: ProviderManager, tool_manager: ToolManager):
self._logger = logging.getLogger(self.__class__.__name__)
self._provider_manager = provider_manager
self._tool_manager = tool_manager
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
app = activity_job.app_job.app
activity = app.activities[activity_job.activity_name]
if len(activity.models) < 1:
raise ActivityError(f"A model is required")
model = app.models[activity.models[0]]
temperature = float(activity.parameters.get("temperature", model.parameters.get("temperature", 0.5)))
max_tokens = int(activity.parameters.get("max_words", model.parameters.get("max_words", 500))*4/3)
messages = []
profile = app.info.profile
if profile:
messages.append(ChatCompletionMessage(role="system", content=profile))
json_format = activity.parameters.get("format", None) == "json"
if json_format:
messages.append(ChatCompletionMessage(
role="system",
content="Provide your response as a JSON object."))
else:
messages.append(ChatCompletionMessage(
role="system",
content="Use the tab length of two spaces when formatting nested lists in markdown."))
tools = await self.get_tools(app, activity)
if tools:
messages.append(ChatCompletionMessage(
role="system",
content="Think step-by-step. Perform as many iterations as necessary "
"to accomplish your goal using the tools provided."))
prompt_template = activity.parameters["prompt"]
prompt = self.render_prompt(prompt_template, inputs)
messages.append(ChatCompletionMessage(role="user", content=prompt))
parts = []
start_time = time.perf_counter()
provider = self._provider_manager.get_provider(model.provider)
for x in range(self.MAX_ITERATIONS):
request = ChatCompletionRequest(
model=model.model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
tools=tools if tools else None,
tool_choice="auto" if tools else None,
response_format=ResponseFormat(type="json_object") if json_format else None
)
response = await provider.create_completion(request)
choice: Choice = response.choices[0]
message: ChatCompletionMessage = choice.message
messages.append(message)
if choice.finish_reason == "tool_calls":
for tool_call in message.tool_calls:
tool_result = await self.process_tool_call(tool_call, app)
messages.append(tool_result)
else:
if message.content:
parts.append(message.content)
if choice.finish_reason:
self._logger.debug(f"Finished with reason {choice.finish_reason} "
f"in {int(time.perf_counter()-start_time)} sec.")
break
activity_job.state = JobState.SUCCESS
activity_job.output = "\n\n".join(parts)
activity_job.output_type = "text/markdown"
except Exception as e:
self._logger.error(e)
activity_job.state = JobState.ERROR
activity_job.output = str(e)
| async def get_tools(self, app: App, activity: Activity) -> List[Tool]: | 6 | 2023-12-14 13:25:52+00:00 | 4k |
multimodallearning/DG-TTA | dg_tta/tta/augmentation_utils.py | [
{
"identifier": "MIND3D",
"path": "dg_tta/mind.py",
"snippet": "class MIND3D(torch.nn.Module):\n def __init__(self, delta=1, sigma=1, randn_weighting=0.05) -> None:\n super().__init__()\n self.delta = delta\n self.sigma = sigma\n self.out_channels = 12\n # define start and end locations for self-similarity pattern\n six_neighbourhood = torch.tensor(\n [[0, 1, 1], [1, 1, 0], [1, 0, 1], [1, 1, 2], [2, 1, 1], [1, 2, 1]],\n dtype=torch.float,\n )\n\n # squared distances\n dist = pdist(six_neighbourhood.unsqueeze(0)).squeeze(0)\n\n # define comparison mask\n x, y = torch.meshgrid(torch.arange(6), torch.arange(6), indexing=\"ij\")\n mask = (x > y).view(-1) & (dist == 2).view(-1)\n\n # build kernel\n idx_shift1 = (\n six_neighbourhood.unsqueeze(1).repeat(1, 6, 1).view(-1, 3)[mask, :].long()\n )\n idx_shift2 = (\n six_neighbourhood.unsqueeze(0).repeat(6, 1, 1).view(-1, 3)[mask, :].long()\n )\n mshift1 = torch.zeros((12, 1, 3, 3, 3))\n mshift1.view(-1)[\n torch.arange(12) * 27\n + idx_shift1[:, 0] * 9\n + idx_shift1[:, 1] * 3\n + idx_shift1[:, 2]\n ] = 1\n mshift2 = torch.zeros((12, 1, 3, 3, 3))\n mshift2.view(-1)[\n torch.arange(12) * 27\n + idx_shift2[:, 0] * 9\n + idx_shift2[:, 1] * 3\n + idx_shift2[:, 2]\n ] = 1\n self.rpad = torch.nn.ReplicationPad3d(delta)\n self.mshift1 = mshift1\n self.mshift2 = mshift2\n self.randn_weighting = randn_weighting\n\n def forward(self, img):\n # compute patch-ssd\n device = img.device\n\n edge_selection = F.conv3d(\n self.rpad(img), self.mshift1.to(device), dilation=self.delta\n ) - F.conv3d(self.rpad(img), self.mshift2.to(device), dilation=self.delta)\n\n edge_selection = edge_selection + self.randn_weighting * torch.randn_like(\n edge_selection\n )\n ssd = smooth(edge_selection**2, self.sigma)\n\n # MIND equation\n mind = ssd - torch.min(ssd, 1, keepdim=True)[0]\n mind_var = torch.mean(mind, 1, keepdim=True)\n mind_var = torch.clamp(\n mind_var, mind_var.mean() * 0.001, mind_var.mean() * 1000\n )\n mind /= mind_var\n mind = torch.exp(-mind)\n\n return mind"
},
{
"identifier": "gin_aug",
"path": "dg_tta/gin.py",
"snippet": "def gin_aug(input):\n cfg = dict(\n IN_CHANNELS=1,\n N_LAYER=4,\n INTERM_CHANNELS=2,\n )\n gin_group_conv = GINGroupConv(cfg)\n input = gin_group_conv(input)\n return input"
}
] | import torch
import torch.nn.functional as F
from dg_tta.mind import MIND3D
from dg_tta.gin import gin_aug | 2,382 | # https://github.com/cwmok/LapIRN/blob/d8f96770a704b1f190955cc26297c7b01a270b0a/Code/miccai2020_model_stage.py#L761
# Vincent Arsigny, Olivier Commowick, Xavier Pennec, Nicholas Ayache: A Log-Euclidean Framework for Statistics on Diffeomorphisms
B, C, D, H, W = disp_field.size()
dimension_correction = torch.tensor([D, H, W], device=disp_field.device).view(
1, 3, 1, 1, 1
)
dt = 1.0 / time_steps
with torch.no_grad():
identity = (
F.affine_grid(
torch.eye(3, 4).unsqueeze(0), (1, 1, D, H, W), align_corners=True
)
.permute(0, 4, 1, 2, 3)
.to(disp_field)
)
if ensure_inverse_consistency:
out_disp_field = (
disp_field / dimension_correction / (2**time_steps) * dt
).clone()
out_inverse_disp_field = (
inverse_disp_field / dimension_correction / (2**time_steps) * dt
).clone()
for _ in range(
time_steps if not iter_steps_override else iter_steps_override
):
ds = out_disp_field.clone()
inverse_ds = out_inverse_disp_field.clone()
out_disp_field = +0.5 * ds - 0.5 * F.grid_sample(
inverse_ds,
(identity + ds).permute(0, 2, 3, 4, 1),
padding_mode="border",
align_corners=True,
)
out_inverse_disp_field = +0.5 * inverse_ds - 0.5 * F.grid_sample(
ds,
(identity + inverse_ds).permute(0, 2, 3, 4, 1),
padding_mode="border",
align_corners=True,
)
out_disp_field = out_disp_field * 2**time_steps * dimension_correction
out_inverse_disp_field = (
out_inverse_disp_field * 2**time_steps * dimension_correction
)
else:
# https://github.com/cwmok/LapIRN/blob/d8f96770a704b1f190955cc26297c7b01a270b0a/Code/miccai2020_model_stage.py#L761
ds_dt = (
disp_field / dimension_correction / (2**time_steps)
) # velocity = ds/dt
inverse_ds_dt = (
inverse_disp_field / dimension_correction / (2**time_steps)
)
ds = ds_dt * dt
inverse_ds = inverse_ds_dt * dt
for _ in range(
time_steps if not iter_steps_override else iter_steps_override
):
ds = ds + F.grid_sample(
ds,
(identity + ds).permute(0, 2, 3, 4, 1),
mode="bilinear",
padding_mode="zeros",
align_corners=True,
)
inverse_ds = inverse_ds + F.grid_sample(
inverse_ds,
(identity + inverse_ds).permute(0, 2, 3, 4, 1),
mode="bilinear",
padding_mode="zeros",
align_corners=True,
)
out_disp_field = ds * dimension_correction
out_inverse_disp_field = inverse_ds * dimension_correction
return out_disp_field, out_inverse_disp_field
def get_disp_field(
batch_num, size_3d, factor=0.1, interpolation_factor=5, device="cpu"
):
field = get_rf_field(
batch_num,
size_3d,
alternating_fields=False,
num_fields=3,
interpolation_factor=interpolation_factor,
device=device,
)
STEPS = 5
disp_field, inverse_disp_field = calc_consistent_diffeomorphic_field(
field * factor, torch.zeros_like(field), STEPS, ensure_inverse_consistency=True
)
return disp_field.permute(0, 2, 3, 4, 1), inverse_disp_field.permute(0, 2, 3, 4, 1)
def get_rand_affine(batch_size, strength=0.05, flip=False):
affine = torch.cat(
(
torch.randn(batch_size, 3, 4) * strength + torch.eye(3, 4).unsqueeze(0),
torch.tensor([0, 0, 0, 1]).view(1, 1, 4).repeat(batch_size, 1, 1),
),
1,
)
if flip:
flip_affine = torch.diag(
torch.cat([(2 * (torch.rand(3) > 0.5).float() - 1), torch.tensor([1.0])])
)
affine = affine @ flip_affine
return affine[:, :3], affine.inverse()[:, :3]
def gin_mind_aug(input):
|
def get_rf_field(
num_batch, size_3d, interpolation_factor=4, num_fields=4, device="cpu"
):
rf_field = F.interpolate(
F.avg_pool3d(
F.avg_pool3d(
F.avg_pool3d(
torch.randn(
num_batch,
num_fields,
size_3d[0] // interpolation_factor,
size_3d[1] // interpolation_factor,
size_3d[2] // interpolation_factor,
device=device,
),
interpolation_factor,
stride=1,
padding=interpolation_factor // 2,
),
interpolation_factor,
stride=1,
padding=interpolation_factor // 2,
),
interpolation_factor,
stride=1,
padding=interpolation_factor // 2,
),
size=size_3d,
mode="trilinear",
)
rf_field -= rf_field.mean((-3, -2, -1), keepdim=True)
rf_field /= 1e-3 + rf_field.view(num_batch * num_fields, -1).std(1).view(
num_batch, num_fields, 1, 1, 1
)
return rf_field
def calc_consistent_diffeomorphic_field(
disp_field,
inverse_disp_field,
time_steps=1,
ensure_inverse_consistency=True,
iter_steps_override=None,
):
# https://github.com/multimodallearning/convexAdam/blob/76a595914eb21ea17795e6cd19503ab447f0ea6b/l2r_2021_convexAdam_task1_docker.py#L166
# https://github.com/cwmok/LapIRN/blob/d8f96770a704b1f190955cc26297c7b01a270b0a/Code/miccai2020_model_stage.py#L761
# Vincent Arsigny, Olivier Commowick, Xavier Pennec, Nicholas Ayache: A Log-Euclidean Framework for Statistics on Diffeomorphisms
B, C, D, H, W = disp_field.size()
dimension_correction = torch.tensor([D, H, W], device=disp_field.device).view(
1, 3, 1, 1, 1
)
dt = 1.0 / time_steps
with torch.no_grad():
identity = (
F.affine_grid(
torch.eye(3, 4).unsqueeze(0), (1, 1, D, H, W), align_corners=True
)
.permute(0, 4, 1, 2, 3)
.to(disp_field)
)
if ensure_inverse_consistency:
out_disp_field = (
disp_field / dimension_correction / (2**time_steps) * dt
).clone()
out_inverse_disp_field = (
inverse_disp_field / dimension_correction / (2**time_steps) * dt
).clone()
for _ in range(
time_steps if not iter_steps_override else iter_steps_override
):
ds = out_disp_field.clone()
inverse_ds = out_inverse_disp_field.clone()
out_disp_field = +0.5 * ds - 0.5 * F.grid_sample(
inverse_ds,
(identity + ds).permute(0, 2, 3, 4, 1),
padding_mode="border",
align_corners=True,
)
out_inverse_disp_field = +0.5 * inverse_ds - 0.5 * F.grid_sample(
ds,
(identity + inverse_ds).permute(0, 2, 3, 4, 1),
padding_mode="border",
align_corners=True,
)
out_disp_field = out_disp_field * 2**time_steps * dimension_correction
out_inverse_disp_field = (
out_inverse_disp_field * 2**time_steps * dimension_correction
)
else:
# https://github.com/cwmok/LapIRN/blob/d8f96770a704b1f190955cc26297c7b01a270b0a/Code/miccai2020_model_stage.py#L761
ds_dt = (
disp_field / dimension_correction / (2**time_steps)
) # velocity = ds/dt
inverse_ds_dt = (
inverse_disp_field / dimension_correction / (2**time_steps)
)
ds = ds_dt * dt
inverse_ds = inverse_ds_dt * dt
for _ in range(
time_steps if not iter_steps_override else iter_steps_override
):
ds = ds + F.grid_sample(
ds,
(identity + ds).permute(0, 2, 3, 4, 1),
mode="bilinear",
padding_mode="zeros",
align_corners=True,
)
inverse_ds = inverse_ds + F.grid_sample(
inverse_ds,
(identity + inverse_ds).permute(0, 2, 3, 4, 1),
mode="bilinear",
padding_mode="zeros",
align_corners=True,
)
out_disp_field = ds * dimension_correction
out_inverse_disp_field = inverse_ds * dimension_correction
return out_disp_field, out_inverse_disp_field
def get_disp_field(
batch_num, size_3d, factor=0.1, interpolation_factor=5, device="cpu"
):
field = get_rf_field(
batch_num,
size_3d,
alternating_fields=False,
num_fields=3,
interpolation_factor=interpolation_factor,
device=device,
)
STEPS = 5
disp_field, inverse_disp_field = calc_consistent_diffeomorphic_field(
field * factor, torch.zeros_like(field), STEPS, ensure_inverse_consistency=True
)
return disp_field.permute(0, 2, 3, 4, 1), inverse_disp_field.permute(0, 2, 3, 4, 1)
def get_rand_affine(batch_size, strength=0.05, flip=False):
affine = torch.cat(
(
torch.randn(batch_size, 3, 4) * strength + torch.eye(3, 4).unsqueeze(0),
torch.tensor([0, 0, 0, 1]).view(1, 1, 4).repeat(batch_size, 1, 1),
),
1,
)
if flip:
flip_affine = torch.diag(
torch.cat([(2 * (torch.rand(3) > 0.5).float() - 1), torch.tensor([1.0])])
)
affine = affine @ flip_affine
return affine[:, :3], affine.inverse()[:, :3]
def gin_mind_aug(input): | return MIND3D()(gin_aug(input)) | 1 | 2023-12-08 08:43:11+00:00 | 4k |
chengkaiAcademyCity/EnvAwareAfford | code/models/model_env_aware_LineDisF.py | [
{
"identifier": "PointNetEncoder",
"path": "code/models/pointnet_utils.py",
"snippet": "class PointNetEncoder(nn.Module):\n def __init__(self, global_feat=True, feature_transform=False, channel=3):\n super(PointNetEncoder, self).__init__()\n self.stn = STN3d(channel)\n self.conv1 = torch.nn.Conv1d(channel, 64, 1)\n self.conv2 = torch.nn.Conv1d(64, 128, 1)\n self.conv3 = torch.nn.Conv1d(128, 1024, 1)\n self.bn1 = nn.BatchNorm1d(64)\n self.bn2 = nn.BatchNorm1d(128)\n self.bn3 = nn.BatchNorm1d(1024)\n self.global_feat = global_feat\n self.feature_transform = feature_transform\n if self.feature_transform:\n self.fstn = STNkd(k=64)\n\n def forward(self, x):\n B, D, N = x.size() # B: batch size, D: channel, N: number of points\n trans = self.stn(x)\n x = x.transpose(2, 1)\n if D > 3:\n feature = x[:, :, 3:]\n x = x[:, :, :3]\n x = torch.bmm(x, trans)\n if D > 3:\n x = torch.cat([x, feature], dim=2)\n x = x.transpose(2, 1)\n x = F.relu(self.bn1(self.conv1(x)))\n\n if self.feature_transform:\n trans_feat = self.fstn(x)\n x = x.transpose(2, 1)\n x = torch.bmm(x, trans_feat)\n x = x.transpose(2, 1)\n else:\n trans_feat = None\n\n pointfeat = x\n x = F.relu(self.bn2(self.conv2(x)))\n x = self.bn3(self.conv3(x))\n x = torch.max(x, 2, keepdim=True)[0]\n x = x.view(-1, 1024)\n if self.global_feat:\n return x, trans, trans_feat\n else:\n x = x.view(-1, 1024, 1).repeat(1, 1, N)\n return torch.cat([x, pointfeat], 1), trans, trans_feat"
},
{
"identifier": "feature_transform_reguliarzer",
"path": "code/models/pointnet_utils.py",
"snippet": "def feature_transform_reguliarzer(trans):\n d = trans.size()[1]\n I = torch.eye(d)[None, :, :]\n if trans.is_cuda:\n I = I.cuda()\n loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2, 1)) - I, dim=(1, 2)))\n return loss"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader
from pointnet2_ops import pointnet2_utils
from pointnet2_ops.pointnet2_modules import PointnetFPModule, PointnetSAModule
from pointnet2.models.pointnet2_ssg_cls import PointNet2ClassificationSSG
from .pointnet_utils import PointNetEncoder, feature_transform_reguliarzer | 2,463 | be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
return self.fc_layer(l_features[0])
class PointNet2SemSegSSGShape(PointNet2ClassificationSSG):
def _build_model(self):
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModule(
npoint=512,
radius=0.2,
nsample=64,
mlp=[3, 64, 64, 128],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=128,
radius=0.4,
nsample=64,
mlp=[128, 128, 128, 256],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
mlp=[256, 256, 256, 256],
use_xyz=True,
)
)
self.FP_modules = nn.ModuleList()
self.FP_modules.append(PointnetFPModule(mlp=[128 + 3, 128, 128, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 256, 256, 256]))
self.fc_layer = nn.Sequential(
nn.Conv1d(128, self.hparams['feat_dim'], kernel_size=1, bias=False),
nn.BatchNorm1d(self.hparams['feat_dim']),
nn.ReLU(True),
)
self.fc_layer2 = nn.Sequential(
nn.Linear(256, self.hparams['feat_dim']),
nn.BatchNorm1d(self.hparams['feat_dim']),
nn.ReLU(True),
)
def forward(self, pointcloud):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
bottleneck_feats = l_features[-1].squeeze(-1)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
return self.fc_layer(l_features[0]), self.fc_layer2(bottleneck_feats)
# class PointNet(nn.Module):
# def __init__(self, feat_dim):
# super(PointNet, self).__init__()
#
# self.conv1 = nn.Conv1d(feat_dim*2, feat_dim, 1)
# self.conv2 = nn.Conv1d(feat_dim, feat_dim, 1)
# self.conv3 = nn.Conv1d(feat_dim, feat_dim, 1)
#
# self.bn1 = nn.BatchNorm1d(feat_dim)
# self.bn2 = nn.BatchNorm1d(feat_dim)
# self.bn3 = nn.BatchNorm1d(feat_dim)
#
# # B x 2F x N
# # output: B x F
# def forward(self, x):
# x = torch.relu(self.bn1(self.conv1(x)))
# x = torch.relu(self.bn2(self.conv2(x)))
# x = torch.relu(self.bn3(self.conv3(x)))
# x = x.max(dim=-1)[0]
# return x
class PointNet(nn.Module):
def __init__(self, feat_dim, normal_channel=False):
super(PointNet, self).__init__()
if normal_channel:
channel = 6
else:
channel = 3
| """
This file borrows PointNet2 implementation: https://github.com/erikwijmans/Pointnet2_PyTorch
"""
class MyFPModule(nn.Module):
def __init__(self):
super(MyFPModule, self).__init__()
# B x N x 3, B x M X 3, B x F x M
# output: B x F x N
def forward(self, unknown, known, known_feats):
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
return new_features.squeeze(-1)
class PointNet2SemSegSSG(PointNet2ClassificationSSG):
def _build_model(self):
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModule(
npoint=1024,
radius=0.1,
nsample=32,
mlp=[3, 32, 32, 64],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=256,
radius=0.2,
nsample=32,
mlp=[64, 64, 64, 128],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=64,
radius=0.4,
nsample=32,
mlp=[128, 128, 128, 256],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=16,
radius=0.8,
nsample=32,
mlp=[256, 256, 256, 512],
use_xyz=True,
)
)
self.FP_modules = nn.ModuleList()
self.FP_modules.append(PointnetFPModule(mlp=[128 + 3, 128, 128, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 64, 256, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 256]))
self.FP_modules.append(PointnetFPModule(mlp=[512 + 256, 256, 256]))
self.fc_layer = nn.Sequential(
nn.Conv1d(128, self.hparams['feat_dim'], kernel_size=1, bias=False),
nn.BatchNorm1d(self.hparams['feat_dim']),
nn.ReLU(True),
)
def forward(self, pointcloud):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
return self.fc_layer(l_features[0])
class PointNet2SemSegSSGShape(PointNet2ClassificationSSG):
def _build_model(self):
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModule(
npoint=512,
radius=0.2,
nsample=64,
mlp=[3, 64, 64, 128],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=128,
radius=0.4,
nsample=64,
mlp=[128, 128, 128, 256],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
mlp=[256, 256, 256, 256],
use_xyz=True,
)
)
self.FP_modules = nn.ModuleList()
self.FP_modules.append(PointnetFPModule(mlp=[128 + 3, 128, 128, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 256, 256, 256]))
self.fc_layer = nn.Sequential(
nn.Conv1d(128, self.hparams['feat_dim'], kernel_size=1, bias=False),
nn.BatchNorm1d(self.hparams['feat_dim']),
nn.ReLU(True),
)
self.fc_layer2 = nn.Sequential(
nn.Linear(256, self.hparams['feat_dim']),
nn.BatchNorm1d(self.hparams['feat_dim']),
nn.ReLU(True),
)
def forward(self, pointcloud):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
bottleneck_feats = l_features[-1].squeeze(-1)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
return self.fc_layer(l_features[0]), self.fc_layer2(bottleneck_feats)
# class PointNet(nn.Module):
# def __init__(self, feat_dim):
# super(PointNet, self).__init__()
#
# self.conv1 = nn.Conv1d(feat_dim*2, feat_dim, 1)
# self.conv2 = nn.Conv1d(feat_dim, feat_dim, 1)
# self.conv3 = nn.Conv1d(feat_dim, feat_dim, 1)
#
# self.bn1 = nn.BatchNorm1d(feat_dim)
# self.bn2 = nn.BatchNorm1d(feat_dim)
# self.bn3 = nn.BatchNorm1d(feat_dim)
#
# # B x 2F x N
# # output: B x F
# def forward(self, x):
# x = torch.relu(self.bn1(self.conv1(x)))
# x = torch.relu(self.bn2(self.conv2(x)))
# x = torch.relu(self.bn3(self.conv3(x)))
# x = x.max(dim=-1)[0]
# return x
class PointNet(nn.Module):
def __init__(self, feat_dim, normal_channel=False):
super(PointNet, self).__init__()
if normal_channel:
channel = 6
else:
channel = 3 | self.feat = PointNetEncoder(global_feat=True, feature_transform=True, channel=channel) | 0 | 2023-12-08 09:29:36+00:00 | 4k |
tommy-xq/SA2VP | vpt_main/src/engine/trainer.py | [
{
"identifier": "Evaluator",
"path": "vpt_main/src/engine/evaluator.py",
"snippet": "class Evaluator():\n \"\"\"\n An evaluator with below logics:\n\n 1. find which eval module to use.\n 2. store the eval results, pretty print it in log file as well.\n \"\"\"\n\n def __init__(\n self,\n ) -> None:\n self.results = defaultdict(dict)\n self.iteration = -1\n self.threshold_end = 0.5\n\n def update_iteration(self, iteration: int) -> None:\n \"\"\"update iteration info\"\"\"\n self.iteration = iteration\n\n def update_result(self, metric: str, value: Union[float, dict]) -> None:\n if self.iteration > -1:\n key_name = \"epoch_\" + str(self.iteration)\n else:\n key_name = \"final\"\n if isinstance(value, float):\n self.results[key_name].update({metric: value})\n else:\n if metric in self.results[key_name]:\n self.results[key_name][metric].update(value)\n else:\n self.results[key_name].update({metric: value})\n\n def classify(self, probs, targets, test_data, multilabel=False):\n \"\"\"\n Evaluate classification result.\n Args:\n probs: np.ndarray for num_data x num_class, predicted probabilities\n targets: np.ndarray for multilabel, list of integers for single label\n test_labels: map test image ids to a list of class labels\n \"\"\"\n if not targets:\n raise ValueError(\n \"When evaluating classification, need at least give targets\")\n\n if multilabel:\n self._eval_multilabel(probs, targets, test_data)\n else:\n self._eval_singlelabel(probs, targets, test_data)\n\n def _eval_singlelabel(\n self,\n scores: np.ndarray,\n targets: List[int],\n eval_type: str\n ) -> None:\n \"\"\"\n if number of labels > 2:\n top1 and topk (5 by default) accuracy\n if number of labels == 2:\n top1 and rocauc\n \"\"\"\n acc_dict = singlelabel.compute_acc_auc(scores, targets)\n\n log_results = {\n k: np.around(v * 100, decimals=2) for k, v in acc_dict.items()\n }\n save_results = acc_dict\n\n self.log_and_update(log_results, save_results, eval_type)\n\n def _eval_multilabel(\n self,\n scores: np.ndarray,\n targets: np.ndarray,\n eval_type: str\n ) -> None:\n num_labels = scores.shape[-1]\n targets = multilabel.multihot(targets, num_labels)\n\n log_results = {}\n ap, ar, mAP, mAR = multilabel.compute_map(scores, targets)\n f1_dict = multilabel.get_best_f1_scores(\n targets, scores, self.threshold_end)\n\n log_results[\"mAP\"] = np.around(mAP * 100, decimals=2)\n log_results[\"mAR\"] = np.around(mAR * 100, decimals=2)\n log_results.update({\n k: np.around(v * 100, decimals=2) for k, v in f1_dict.items()})\n save_results = {\n \"ap\": ap, \"ar\": ar, \"mAP\": mAP, \"mAR\": mAR, \"f1\": f1_dict\n }\n self.log_and_update(log_results, save_results, eval_type)\n\n def log_and_update(self, log_results, save_results, eval_type):\n log_str = \"\"\n for k, result in log_results.items():\n if not isinstance(result, np.ndarray):\n log_str += f\"{k}: {result:.2f}\\t\"\n else:\n log_str += f\"{k}: {list(result)}\\t\"\n logger.info(f\"Classification results with {eval_type}: {log_str}\")\n # save everything\n self.update_result(\"classification\", {eval_type: save_results})"
},
{
"identifier": "make_scheduler",
"path": "vpt_main/src/solver/lr_scheduler.py",
"snippet": "def make_scheduler(\n optimizer: optim.Optimizer, train_params: CfgNode\n) -> LambdaLR:\n warmup = train_params.WARMUP_EPOCH\n total_iters = train_params.TOTAL_EPOCH\n\n if train_params.SCHEDULER == \"cosine\":\n scheduler = WarmupCosineSchedule(\n optimizer,\n warmup_steps=warmup,\n t_total=total_iters\n )\n elif train_params.SCHEDULER == \"cosine_hardrestart\":\n scheduler = WarmupCosineWithHardRestartsSchedule(\n optimizer,\n warmup_steps=warmup,\n t_total=total_iters\n )\n\n elif train_params.SCHEDULER == \"plateau\":\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer,\n \"max\",\n patience=5,\n verbose=True,\n factor=train_params.LR_DECAY_FACTOR,\n )\n else:\n scheduler = None\n return scheduler"
},
{
"identifier": "make_optimizer",
"path": "vpt_main/src/solver/optimizer.py",
"snippet": "def make_optimizer(\n models: List[Any], train_params: CfgNode\n) -> Optimizer:\n params = []\n for model in models:\n # only include learnable params\n if train_params.DBG_TRAINABLE:\n logger.info(\"Trainable params:\")\n\n for key, value in model.named_parameters():\n \n if value.requires_grad:\n\n if train_params.DBG_TRAINABLE:\n logger.info(\"\\t{}, {}, {}\".format(key, value.numel(), value.shape))\n params.append((key, value))\n\n if train_params.WEIGHT_DECAY > 0:\n if train_params.OPTIMIZER == 'adamw':\n\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in params\n if not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01},\n {'params': [p for n, p in params\n if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=train_params.BASE_LR,\n )\n else:\n _params = []\n for p in params:\n key, value = p\n # print(key)\n # if not value.requires_grad:\n # continue\n lr = train_params.BASE_LR\n weight_decay = train_params.WEIGHT_DECAY\n if \"last_layer.bias\" in key:\n # no regularization (weight decay) for last layer's bias\n weight_decay = 0.0\n\n if train_params.BIAS_MULTIPLIER == 1.:\n _params += [{\n \"params\": [value],\n \"lr\": lr,\n \"weight_decay\": weight_decay\n }]\n else:\n if \"bias\" in key and \"last_layer.bias\" not in key:\n # use updated lr for this param\n lr_value = lr * train_params.BIAS_MULTIPLIER\n else:\n lr_value = lr\n\n if train_params.DBG_TRAINABLE:\n logger.info(\"\\t{}, {:.4f}\".format(key, lr_value))\n\n _params += [{\n \"params\": [value],\n \"lr\": lr_value,\n \"weight_decay\": weight_decay\n }]\n\n if train_params.OPTIMIZER == 'adam':\n optimizer = optim.Adam(\n _params,\n lr=train_params.BASE_LR,\n weight_decay=train_params.WEIGHT_DECAY,\n )\n else:\n optimizer = optim.SGD(\n _params,\n train_params.BASE_LR,\n momentum=train_params.MOMENTUM,\n weight_decay=train_params.WEIGHT_DECAY\n )\n return optimizer\n else:\n if train_params.OPTIMIZER == 'adam':\n optimizer = optim.Adam(\n model.parameters(),\n lr=train_params.BASE_LR\n )\n else:\n _params = []\n for p in params:\n key, value = p\n\n lr = train_params.BASE_LR\n\n if train_params.BIAS_MULTIPLIER == 1.:\n _params += [{\n \"params\": [value],\n \"lr\": lr,\n }]\n else:\n if \"bias\" in key and \"last_layer.bias\" not in key:\n # use updated lr for this param\n lr_value = lr * train_params.BIAS_MULTIPLIER\n else:\n lr_value = lr\n\n if train_params.DBG_TRAINABLE:\n logger.info(\"\\t{}, {:.4f}\".format(key, lr_value))\n\n _params += [{\n \"params\": [value],\n \"lr\": lr_value,\n }]\n optimizer = optim.SGD(\n _params,\n train_params.BASE_LR,\n momentum=train_params.MOMENTUM,\n )\n return optimizer"
},
{
"identifier": "build_loss",
"path": "vpt_main/src/solver/losses.py",
"snippet": "def build_loss(cfg):\n loss_name = cfg.SOLVER.LOSS\n assert loss_name in LOSS, \\\n f'loss name {loss_name} is not supported'\n loss_fn = LOSS[loss_name]\n if not loss_fn:\n return None\n else:\n return loss_fn(cfg)"
},
{
"identifier": "logging",
"path": "vpt_main/src/utils/logging.py",
"snippet": "_FORMAT = \"[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s\"\ndef _suppress_print():\n def print_pass(*objects, sep=\" \", end=\"\\n\", file=sys.stdout, flush=False):\ndef _cached_log_stream(filename):\ndef setup_logging(\n num_gpu, num_shards, output=\"\", name=\"visual_prompt\", color=True):\ndef setup_single_logging(name, output=\"\"):\ndef get_logger(name):\ndef log_json_stats(stats, sort_keys=True):\n def __init__(self, *args, **kwargs):\n def formatMessage(self, record: logging.LogRecord) -> str:\nclass _ColorfulFormatter(logging.Formatter):"
},
{
"identifier": "AverageMeter",
"path": "vpt_main/src/utils/train_utils.py",
"snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)"
},
{
"identifier": "gpu_mem_usage",
"path": "vpt_main/src/utils/train_utils.py",
"snippet": "def gpu_mem_usage():\n \"\"\"Computes the GPU memory usage for the current device (GB).\"\"\"\n if not torch.cuda.is_available():\n return 0\n # Number of bytes in a megabyte\n _B_IN_GB = 1024 * 1024 * 1024\n\n mem_usage_bytes = torch.cuda.max_memory_allocated()\n return mem_usage_bytes / _B_IN_GB"
}
] | import datetime
import time
import torch
import torch.nn as nn
import os
from fvcore.common.config import CfgNode
from fvcore.common.checkpoint import Checkpointer
from ..engine.evaluator import Evaluator
from ..solver.lr_scheduler import make_scheduler
from ..solver.optimizer import make_optimizer
from ..solver.losses import build_loss
from ..utils import logging
from ..utils.train_utils import AverageMeter, gpu_mem_usage | 2,958 | #!/usr/bin/env python3
"""
a trainer class
"""
logger = logging.get_logger("visual_prompt")
class Trainer():
"""
a trainer with below logics:
1. Build optimizer, scheduler
2. Load checkpoints if provided
3. Train and eval at each epoch
"""
def __init__(
self,
cfg: CfgNode,
model: nn.Module,
evaluator: Evaluator,
device: torch.device,
) -> None:
self.cfg = cfg
self.model = model
self.device = device
# solver related
logger.info("\tSetting up the optimizer...")
| #!/usr/bin/env python3
"""
a trainer class
"""
logger = logging.get_logger("visual_prompt")
class Trainer():
"""
a trainer with below logics:
1. Build optimizer, scheduler
2. Load checkpoints if provided
3. Train and eval at each epoch
"""
def __init__(
self,
cfg: CfgNode,
model: nn.Module,
evaluator: Evaluator,
device: torch.device,
) -> None:
self.cfg = cfg
self.model = model
self.device = device
# solver related
logger.info("\tSetting up the optimizer...") | self.optimizer = make_optimizer([self.model], cfg.SOLVER) | 2 | 2023-12-12 13:19:17+00:00 | 4k |
ChatClue/ChatClue | background/memory/tasks.py | [
{
"identifier": "ConversationMemoryManager",
"path": "database/conversations.py",
"snippet": "class ConversationMemoryManager:\n \"\"\"\n Manages database operations for the Conversation table,\n including insertions, updates, deletions, and queries.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes the ConversationManager with a database session.\n \"\"\"\n self.engine = get_engine()\n\n def add_conversation(self, speaker_type, response, response_embedding, response_tokens):\n \"\"\"\n Adds a new conversation to the database, automatically creating embeddings and calculating token counts.\n\n Parameters:\n user_prompt (str): The user's prompt text.\n assistant_response (str): The assistant's response text.\n \"\"\"\n\n new_conversation = Conversation(\n speaker_type=speaker_type,\n response=response,\n response_tokens=response_tokens,\n response_embedding=response_embedding,\n )\n \n Session = sessionmaker(bind=self.engine)\n\n with Session() as session:\n session.add(new_conversation)\n session.commit()\n\n def get_conversation(self, conversation_id):\n \"\"\"\n Retrieves a conversation from the database by its ID.\n \"\"\"\n Session = sessionmaker(bind=self.engine)\n with Session() as session:\n return session.query(Conversation).filter_by(id=conversation_id).first()\n\n def update_conversation(self, conversation_id, **updates):\n \"\"\"\n Updates a conversation in the database based on the provided conversation ID and update fields.\n\n Parameters:\n conversation_id (int): The ID of the conversation to be updated.\n **updates: Arbitrary keyword arguments representing the fields to update and their new values.\n\n Example:\n To update the user prompt and tokens of a conversation with ID 123:\n \n update_conversation(123, userPrompt=\"New prompt text\", userPromptTokens=5)\n\n Note:\n The fields in **updates should match the column names of the Conversation model.\n \"\"\"\n Session = sessionmaker(bind=self.engine)\n with Session() as session:\n session.query(Conversation).filter_by(id=conversation_id).update(updates)\n session.commit()\n\n def delete_conversation(self, conversation_id):\n \"\"\"\n Deletes a conversation from the database.\n \"\"\"\n Session = sessionmaker(bind=self.engine)\n with Session() as session:\n conversation = self.session.query(Conversation).filter_by(id=conversation_id).first()\n if conversation:\n session.delete(conversation)\n session.commit()\n\n def list_conversations(self, after_date=None, before_date=None):\n \"\"\"\n Lists all conversations in the database within a specified date range.\n\n Parameters:\n after_date (datetime): Optional. Retrieve conversations after this date.\n before_date (datetime): Optional. Retrieve conversations before this date.\n\n Returns:\n List of Conversation objects that match the criteria.\n \"\"\"\n Session = sessionmaker(bind=self.engine)\n with Session() as session:\n query = session.query(Conversation)\n \n if after_date:\n query = query.filter(Conversation.createdAt >= after_date)\n \n if before_date:\n query = query.filter(Conversation.createdAt <= before_date)\n\n return query.all()\n\n def list_recent_conversations(self, context_limit):\n \"\"\"\n Lists recent conversations from the database such that their total token count is close to the context limit.\n\n Returns:\n List of Conversation objects that match the criteria.\n \"\"\"\n Session = sessionmaker(bind=self.engine)\n with Session() as session:\n subquery = session.query(\n Conversation,\n func.sum(Conversation.response_tokens).over(order_by=Conversation.created_at.desc()).label('running_total')\n ).subquery()\n\n query = session.query(subquery).filter(subquery.c.running_total <= context_limit).order_by(subquery.c.created_at.asc())\n\n return query.all()"
},
{
"identifier": "SystemStateManager",
"path": "database/system_state.py",
"snippet": "class SystemStateManager:\n \"\"\"\n Manages database operations for the SystemState table,\n including insertions and updates.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes the SystemStateManager with a database session.\n \"\"\"\n self.engine = get_engine()\n\n def get_or_create_state(self):\n \"\"\"\n Retrieves the current system state from the database, or creates it if it doesn't exist.\n \"\"\"\n Session = sessionmaker(bind=self.engine)\n with Session() as session:\n state = session.query(SystemState).first()\n if not state:\n state = SystemState()\n session.add(state)\n session.commit()\n return state\n\n def update_system_state(self, **updates):\n \"\"\"\n Updates the system state in the database.\n\n Parameters:\n **updates: Arbitrary keyword arguments representing the fields to update and their new values.\n\n Example:\n To update the last wake time of the system state:\n \n update_system_state(last_wake_time=datetime.now())\n \"\"\"\n Session = sessionmaker(bind=self.engine)\n with Session() as session:\n state = session.query(SystemState).first()\n if not state:\n state = SystemState()\n session.add(state)\n\n for key, value in updates.items():\n setattr(state, key, value)\n\n session.commit()"
},
{
"identifier": "OpenAIClient",
"path": "integrations/openai/openai.py",
"snippet": "class OpenAIClient:\n \"\"\"\n A client class for interacting with OpenAI's GPT model.\n\n This class handles the creation and streaming of responses from the OpenAI API based on recognized text input.\n\n Attributes:\n client (OpenAI): The OpenAI client for API interaction.\n response_queue (queue.Queue): Queue to hold responses from OpenAI.\n stop_signal (threading.Event): Signal to control the streaming of responses.\n model (str): The model name for OpenAI API requests.\n \"\"\"\n def __init__(self):\n \"\"\"\n Initializes the OpenAI client with settings from the configuration.\n\n If an API key is provided in OPENAI_SETTINGS, it uses that key.\n Otherwise, it defaults to the API key set in the environment variable.\n \"\"\"\n api_key = OPENAI_SETTINGS.get('api_key')\n if api_key:\n self.client = OpenAI(api_key=api_key)\n else:\n self.client = OpenAI()\n self.response_queue = queue.Queue()\n self.stop_signal = threading.Event()\n self.model = OPENAI_SETTINGS.get('model', \"gpt-3.5-turbo-1106\")\n self.image_model = OPENAI_SETTINGS.get('image_model', \"gpt-4-1106-vision-preview\")\n self.embedding_model = OPENAI_SETTINGS.get('embedding_model', \"text-embedding-ada-002\")\n self.temperature = OPENAI_SETTINGS.get('temperature', 0.5)\n self.streaming_complete = False\n\n def create_completion(self, recent_messages, streaming=True, response_format=None, tools=None, is_tool_call=False):\n \"\"\"\n Creates a completion request to the OpenAI API based on recent messages.\n\n This method selects a model based on the presence of an image URL in the conversation \n and sends a completion request to the OpenAI API using the chosen model. It can also\n handle tool choices for the conversation if provided.\n\n Args:\n recent_messages (list): A list of message dictionaries from the recent conversation.\n streaming (bool): Indicates if streaming is enabled for the response.\n response_format (str, optional): The format in which the response is expected.\n tools (list, optional): A list of tools that can be used in the conversation.\n is_tool_call (bool): Flag to indicate if this is a direct tool call.\n\n Returns:\n The response object from the OpenAI API or None if an error occurs.\n \"\"\"\n try:\n model = self.model\n if OpenAIConversationBuilder.messages_array_contains_image(recent_messages):\n # Use the image model if any message contains an image URL\n model = self.image_model\n\n tool_choice = None\n if tools is not None and not is_tool_call:\n # Modify the last message to prompt for a tool choice if tools are available\n recent_messages[-1][\"content\"] = \"Please pick a tool from the tools array and return a tools response to complete this request: \" + recent_messages[-1][\"content\"]\n tool_choice = \"auto\"\n\n # Create a completion request to the OpenAI API\n response = self.client.chat.completions.create(\n model=model,\n messages=recent_messages,\n tools=tools,\n temperature=self.temperature,\n stream=streaming,\n response_format=response_format,\n tool_choice=tool_choice\n )\n return response\n except OpenAIError as e:\n # Handle API-specific errors\n logging.error(f\"OpenAI API error: {e}\")\n return None\n except Exception as e:\n # Handle general errors\n logging.error(f\"Error while creating completion: {e}\")\n return None\n\n\n def stream_response(self, conversation):\n \"\"\"\n Streams the response from the OpenAI API to a queue.\n\n This method fetches the response for the recognized text and puts each response chunk into a queue.\n The method frequently checks for a stop signal to terminate streaming immediately.\n\n Args:\n recognized_text (str): The text recognized from the audio input.\n \"\"\"\n self.streaming_complete = False\n try:\n response = self.create_completion(conversation)\n if response:\n for chunk in response:\n if self.stop_signal.is_set():\n logging.info(\"Streaming stopped due to stop signal.\")\n break\n self.response_queue.put(chunk)\n else:\n logging.info(\"No response from OpenAI API or an error occurred.\")\n except Exception as e:\n logging.error(f\"Error during streaming: {e}\")\n finally:\n self.streaming_complete = True\n\n def create_embeddings(self, text):\n \"\"\"\n Generates embeddings for the given text using the OpenAI API.\n\n Args:\n text (str): The text to generate embeddings for.\n\n Returns:\n The embedding vector as a list, or None if an error occurs.\n \"\"\"\n try:\n response = self.client.embeddings.create(\n model=self.embedding_model,\n input=text\n )\n # return response.\n return response.data[0].embedding\n except OpenAIError as e:\n logging.error(f\"OpenAI API error: {e}\")\n return None\n except Exception as e:\n logging.error(f\"Error while creating embeddings: {e}\")\n return None\n \n def calculate_token_count(self, text):\n \"\"\"\n Calculates the number of tokens for the given text using OpenAI's GPT model.\n\n Args:\n text (str): The text to calculate the token count for.\n\n Returns:\n int: The number of tokens in the text.\n \"\"\"\n enc = tiktoken.encoding_for_model(self.model)\n return len(enc.encode(text))\n \n def stop_processing_request(self):\n \"\"\"\n Stops processing the current request immediately and clears the response queue.\n \"\"\"\n self.stop_signal.set() # Signal to stop streaming\n self.full_stop()\n time.sleep(0.5)\n self.full_stop()\n self.stop_signal.clear() # Reset the stop signal for future requests\n\n \n def clear_queue(self):\n \"\"\"\n Clears all items from the response queue.\n \"\"\"\n while not self.response_queue.empty():\n try:\n self.response_queue.get_nowait() # Remove all items from the queue\n self.response_queue.task_done()\n except queue.Empty:\n break\n \n def conversation_contains_image_url(self, conversation):\n \"\"\"\n Checks if any of the messages in the provided array contain an 'image_url' content type.\n\n Args:\n messages (list): A list of message dictionaries. Each message is expected to have \n 'role' and 'content' keys. The 'content' can be a string or a dictionary.\n\n Returns:\n bool: True if any message contains an 'image_url' content type, False otherwise.\n \"\"\"\n for message in conversation:\n # Check if the content is a dictionary and has 'type' key with value 'image_url'\n if isinstance(message.get('content'), dict) and message['content'].get('type') == 'image_url':\n return True\n return False\n \n def full_stop(self):\n self.clear_queue() # Clear the queue immediately\n self.streaming_complete = False # Reset the streaming state\n \n def shutdown(self):\n self.stop_processing_request()"
}
] | from celery import shared_task
from database.conversations import ConversationMemoryManager
from database.system_state import SystemStateManager
from integrations.openai.openai import OpenAIClient
from datetime import datetime | 3,125 |
@shared_task
def store_conversation_task(speaker_type, response):
"""
A Celery task for storing conversation parts in the database.
This asynchronous task takes a speaker type and a response, and stores them in the database
using the ConversationMemoryManager. It is designed to offload the database writing process
from the main execution thread, improving performance and responsiveness.
Args:
speaker_type (str): The type of speaker (e.g., 'user' or 'assistant'), indicating who is speaking.
response (str): The text of the response or conversation part to be stored.
"""
|
@shared_task
def store_conversation_task(speaker_type, response):
"""
A Celery task for storing conversation parts in the database.
This asynchronous task takes a speaker type and a response, and stores them in the database
using the ConversationMemoryManager. It is designed to offload the database writing process
from the main execution thread, improving performance and responsiveness.
Args:
speaker_type (str): The type of speaker (e.g., 'user' or 'assistant'), indicating who is speaking.
response (str): The text of the response or conversation part to be stored.
""" | openai_client = OpenAIClient() | 2 | 2023-12-06 09:10:06+00:00 | 4k |
GXNU-ZhongLab/ODTrack | lib/train/actors/odtrack.py | [
{
"identifier": "BaseActor",
"path": "lib/train/actors/base_actor.py",
"snippet": "class BaseActor:\n \"\"\" Base class for actor. The actor class handles the passing of the data through the network\n and calculation the loss\"\"\"\n def __init__(self, net, objective):\n \"\"\"\n args:\n net - The network to train\n objective - The loss function\n \"\"\"\n self.net = net\n self.objective = objective\n\n def __call__(self, data: TensorDict):\n \"\"\" Called in each training iteration. Should pass in input data through the network, calculate the loss, and\n return the training stats for the input data\n args:\n data - A TensorDict containing all the necessary data blocks.\n\n returns:\n loss - loss for the input data\n stats - a dict containing detailed losses\n \"\"\"\n raise NotImplementedError\n\n def to(self, device):\n \"\"\" Move the network to device\n args:\n device - device to use. 'cpu' or 'cuda'\n \"\"\"\n self.net.to(device)\n\n def train(self, mode=True):\n \"\"\" Set whether the network is in train mode.\n args:\n mode (True) - Bool specifying whether in training mode.\n \"\"\"\n self.net.train(mode)\n\n def eval(self):\n \"\"\" Set network to eval mode\"\"\"\n self.train(False)"
},
{
"identifier": "NestedTensor",
"path": "lib/utils/misc.py",
"snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)"
},
{
"identifier": "interpolate",
"path": "lib/utils/misc.py",
"snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if float(torchvision.__version__[:3]) < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)"
},
{
"identifier": "box_cxcywh_to_xyxy",
"path": "lib/utils/box_ops.py",
"snippet": "def box_cxcywh_to_xyxy(x):\n x_c, y_c, w, h = x.unbind(-1)\n b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n (x_c + 0.5 * w), (y_c + 0.5 * h)]\n return torch.stack(b, dim=-1)"
},
{
"identifier": "box_xywh_to_xyxy",
"path": "lib/utils/box_ops.py",
"snippet": "def box_xywh_to_xyxy(x):\n x1, y1, w, h = x.unbind(-1)\n b = [x1, y1, x1 + w, y1 + h]\n return torch.stack(b, dim=-1)"
},
{
"identifier": "merge_template_search",
"path": "lib/utils/merge.py",
"snippet": "def merge_template_search(inp_list, return_search=False, return_template=False):\n \"\"\"NOTICE: search region related features must be in the last place\"\"\"\n seq_dict = {\"feat\": torch.cat([x[\"feat\"] for x in inp_list], dim=0),\n \"mask\": torch.cat([x[\"mask\"] for x in inp_list], dim=1),\n \"pos\": torch.cat([x[\"pos\"] for x in inp_list], dim=0)}\n if return_search:\n x = inp_list[-1]\n seq_dict.update({\"feat_x\": x[\"feat\"], \"mask_x\": x[\"mask\"], \"pos_x\": x[\"pos\"]})\n if return_template:\n z = inp_list[0]\n seq_dict.update({\"feat_z\": z[\"feat\"], \"mask_z\": z[\"mask\"], \"pos_z\": z[\"pos\"]})\n return seq_dict"
},
{
"identifier": "generate_heatmap",
"path": "lib/utils/heapmap_utils.py",
"snippet": "def generate_heatmap(bboxes, patch_size=320, stride=16):\r\n \"\"\"\r\n Generate ground truth heatmap same as CenterNet\r\n Args:\r\n bboxes (torch.Tensor): shape of [num_search, bs, 4]\r\n\r\n Returns:\r\n gaussian_maps: list of generated heatmap\r\n\r\n \"\"\"\r\n gaussian_maps = []\r\n heatmap_size = patch_size // stride\r\n for single_patch_bboxes in bboxes:\r\n bs = single_patch_bboxes.shape[0]\r\n gt_scoremap = torch.zeros(bs, heatmap_size, heatmap_size)\r\n classes = torch.arange(bs).to(torch.long)\r\n bbox = single_patch_bboxes * heatmap_size\r\n wh = bbox[:, 2:]\r\n centers_int = (bbox[:, :2] + wh / 2).round()\r\n CenterNetHeatMap.generate_score_map(gt_scoremap, classes, wh, centers_int, 0.7)\r\n gaussian_maps.append(gt_scoremap.to(bbox.device))\r\n return gaussian_maps\r"
},
{
"identifier": "generate_mask_cond",
"path": "lib/utils/ce_utils.py",
"snippet": "def generate_mask_cond(cfg, bs, device, gt_bbox):\r\n template_size = cfg.DATA.TEMPLATE.SIZE\r\n stride = cfg.MODEL.BACKBONE.STRIDE\r\n template_feat_size = template_size // stride\r\n\r\n if cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'ALL':\r\n box_mask_z = None\r\n elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_POINT':\r\n if template_feat_size == 8:\r\n index = slice(3, 4)\r\n elif template_feat_size == 12:\r\n index = slice(5, 6)\r\n elif template_feat_size == 16:\r\n index = slice(7, 8)\r\n elif template_feat_size == 24:\r\n index = slice(11, 12)\r\n elif template_feat_size == 7:\r\n index = slice(3, 4)\r\n elif template_feat_size == 14:\r\n index = slice(6, 7)\r\n else:\r\n raise NotImplementedError\r\n box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device)\r\n box_mask_z[:, index, index] = 1\r\n box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_REC':\r\n # use fixed 4x4 region, 3:5 for 8x8\r\n # use fixed 4x4 region 5:6 for 12x12\r\n if template_feat_size == 8:\r\n index = slice(3, 5)\r\n elif template_feat_size == 12:\r\n index = slice(5, 7)\r\n elif template_feat_size == 7:\r\n index = slice(3, 4)\r\n else:\r\n raise NotImplementedError\r\n box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device)\r\n box_mask_z[:, index, index] = 1\r\n box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n\r\n elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'GT_BOX':\r\n box_mask_z = torch.zeros([bs, template_size, template_size], device=device)\r\n # box_mask_z_ori = data['template_seg'][0].view(-1, 1, *data['template_seg'].shape[2:]) # (batch, 1, 128, 128)\r\n box_mask_z = generate_bbox_mask(box_mask_z, gt_bbox * template_size).unsqueeze(1).to(\r\n torch.float) # (batch, 1, 128, 128)\r\n # box_mask_z_vis = box_mask_z.cpu().numpy()\r\n box_mask_z = F.interpolate(box_mask_z, scale_factor=1. / cfg.MODEL.BACKBONE.STRIDE, mode='bilinear',\r\n align_corners=False)\r\n box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n # box_mask_z_vis = box_mask_z[:, 0, ...].cpu().numpy()\r\n # gaussian_maps_vis = generate_heatmap(data['template_anno'], self.cfg.DATA.TEMPLATE.SIZE, self.cfg.MODEL.STRIDE)[0].cpu().numpy()\r\n else:\r\n raise NotImplementedError\r\n\r\n return box_mask_z\r"
},
{
"identifier": "adjust_keep_rate",
"path": "lib/utils/ce_utils.py",
"snippet": "def adjust_keep_rate(epoch, warmup_epochs, total_epochs, ITERS_PER_EPOCH, base_keep_rate=0.5, max_keep_rate=1, iters=-1):\r\n if epoch < warmup_epochs:\r\n return 1\r\n if epoch >= total_epochs:\r\n return base_keep_rate\r\n if iters == -1:\r\n iters = epoch * ITERS_PER_EPOCH\r\n total_iters = ITERS_PER_EPOCH * (total_epochs - warmup_epochs)\r\n iters = iters - ITERS_PER_EPOCH * warmup_epochs\r\n keep_rate = base_keep_rate + (max_keep_rate - base_keep_rate) \\\r\n * (math.cos(iters / total_iters * math.pi) + 1) * 0.5\r\n\r\n return keep_rate\r"
}
] | from . import BaseActor
from lib.utils.misc import NestedTensor, interpolate
from lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy
from lib.utils.merge import merge_template_search
from ...utils.heapmap_utils import generate_heatmap
from ...utils.ce_utils import generate_mask_cond, adjust_keep_rate
import torch | 3,251 |
class ODTrackActor(BaseActor):
""" Actor for training ODTrack models """
def __init__(self, net, objective, loss_weight, settings, cfg=None):
super().__init__(net, objective)
self.loss_weight = loss_weight
self.settings = settings
self.bs = self.settings.batchsize # batch size
self.cfg = cfg
def __call__(self, data):
"""
args:
data - The input data, should contain the fields 'template', 'search', 'gt_bbox'.
template_images: (N_t, batch, 3, H, W)
search_images: (N_s, batch, 3, H, W)
returns:
loss - the training loss
status - dict containing detailed losses
"""
# forward pass
out_dict = self.forward_pass(data)
# compute losses
loss, status = self.compute_losses(out_dict, data)
return loss, status
def forward_pass(self, data):
template_list = []
search_list = []
for i in range(self.settings.num_template):
template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128)
# template_att_i = data['template_att'][i].view(-1, *data['template_att'].shape[2:]) # (batch, 128, 128)
template_list.append(template_img_i)
for i in range(self.settings.num_search):
search_img_i = data['search_images'][i].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320)
# search_att = data['search_att'][0].view(-1, *data['search_att'].shape[2:]) # (batch, 320, 320)
search_list.append(search_img_i)
box_mask_z = []
ce_keep_rate = None
if self.cfg.MODEL.BACKBONE.CE_LOC:
for i in range(self.settings.num_template):
box_mask_z.append(generate_mask_cond(self.cfg, template_list[i].shape[0], template_list[i].device,
data['template_anno'][i]))
box_mask_z = torch.cat(box_mask_z, dim=1)
ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH
ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH
ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch,
total_epochs=ce_start_epoch + ce_warm_epoch,
ITERS_PER_EPOCH=1,
base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0])
# if len(template_list) == 1:
# template_list = template_list[0]
out_dict = self.net(template=template_list,
search=search_list,
ce_template_mask=box_mask_z,
ce_keep_rate=ce_keep_rate,
return_last_attn=False)
return out_dict
def compute_losses(self, pred_dict, gt_dict, return_status=True):
# currently only support the type of pred_dict is list
assert isinstance(pred_dict, list)
loss_dict = {}
total_status = {}
total_loss = torch.tensor(0., dtype=torch.float).cuda() # 定义 0 tensor,并指定GPU设备
# generate gt gaussian map
gt_gaussian_maps_list = generate_heatmap(gt_dict['search_anno'], self.cfg.DATA.SEARCH.SIZE, self.cfg.MODEL.BACKBONE.STRIDE)
for i in range(len(pred_dict)):
# get GT
gt_bbox = gt_dict['search_anno'][i] # (Ns, batch, 4) (x1,y1,w,h) -> (batch, 4)
gt_gaussian_maps = gt_gaussian_maps_list[i].unsqueeze(1)
# Get boxes
pred_boxes = pred_dict[i]['pred_boxes']
if torch.isnan(pred_boxes).any():
raise ValueError("Network outputs is NAN! Stop Training")
num_queries = pred_boxes.size(1)
|
class ODTrackActor(BaseActor):
""" Actor for training ODTrack models """
def __init__(self, net, objective, loss_weight, settings, cfg=None):
super().__init__(net, objective)
self.loss_weight = loss_weight
self.settings = settings
self.bs = self.settings.batchsize # batch size
self.cfg = cfg
def __call__(self, data):
"""
args:
data - The input data, should contain the fields 'template', 'search', 'gt_bbox'.
template_images: (N_t, batch, 3, H, W)
search_images: (N_s, batch, 3, H, W)
returns:
loss - the training loss
status - dict containing detailed losses
"""
# forward pass
out_dict = self.forward_pass(data)
# compute losses
loss, status = self.compute_losses(out_dict, data)
return loss, status
def forward_pass(self, data):
template_list = []
search_list = []
for i in range(self.settings.num_template):
template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128)
# template_att_i = data['template_att'][i].view(-1, *data['template_att'].shape[2:]) # (batch, 128, 128)
template_list.append(template_img_i)
for i in range(self.settings.num_search):
search_img_i = data['search_images'][i].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320)
# search_att = data['search_att'][0].view(-1, *data['search_att'].shape[2:]) # (batch, 320, 320)
search_list.append(search_img_i)
box_mask_z = []
ce_keep_rate = None
if self.cfg.MODEL.BACKBONE.CE_LOC:
for i in range(self.settings.num_template):
box_mask_z.append(generate_mask_cond(self.cfg, template_list[i].shape[0], template_list[i].device,
data['template_anno'][i]))
box_mask_z = torch.cat(box_mask_z, dim=1)
ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH
ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH
ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch,
total_epochs=ce_start_epoch + ce_warm_epoch,
ITERS_PER_EPOCH=1,
base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0])
# if len(template_list) == 1:
# template_list = template_list[0]
out_dict = self.net(template=template_list,
search=search_list,
ce_template_mask=box_mask_z,
ce_keep_rate=ce_keep_rate,
return_last_attn=False)
return out_dict
def compute_losses(self, pred_dict, gt_dict, return_status=True):
# currently only support the type of pred_dict is list
assert isinstance(pred_dict, list)
loss_dict = {}
total_status = {}
total_loss = torch.tensor(0., dtype=torch.float).cuda() # 定义 0 tensor,并指定GPU设备
# generate gt gaussian map
gt_gaussian_maps_list = generate_heatmap(gt_dict['search_anno'], self.cfg.DATA.SEARCH.SIZE, self.cfg.MODEL.BACKBONE.STRIDE)
for i in range(len(pred_dict)):
# get GT
gt_bbox = gt_dict['search_anno'][i] # (Ns, batch, 4) (x1,y1,w,h) -> (batch, 4)
gt_gaussian_maps = gt_gaussian_maps_list[i].unsqueeze(1)
# Get boxes
pred_boxes = pred_dict[i]['pred_boxes']
if torch.isnan(pred_boxes).any():
raise ValueError("Network outputs is NAN! Stop Training")
num_queries = pred_boxes.size(1) | pred_boxes_vec = box_cxcywh_to_xyxy(pred_boxes).view(-1, 4) # (B,N,4) --> (BN,4) (x1,y1,x2,y2) | 3 | 2023-12-10 03:57:19+00:00 | 4k |
lumina-test/lumina | lumina/analyzer/main.py | [
{
"identifier": "SwitchCounter",
"path": "lumina/analyzer/counter/switch_counter.py",
"snippet": "class SwitchCounter:\n \"\"\" Class to parse switch counter files\n\n Attributes:\n _counter (dict of dict): the switch counters with the following format:\n {'requester': {'ingress': counter_value, 'egress': counter_value},\n 'responder': {'ingress': counter_value, 'egress': counter_value},\n 'requester-mirror': {'ingress': counter_value, 'egress': counter_value},\n 'responder-mirror': {'ingress': counter_value, 'egress': counter_value}}\n \"\"\"\n def __init__(self, snapshot_filename, port_map):\n \"\"\" Constructor\n\n Args:\n snapshot_filename (str): the file where switch dumps its counters\n port_map (dict): the mapping between port name and port number\n\n Returns:\n N/A\n \"\"\"\n with open(snapshot_filename, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n ingress_counters = conf['counter']['ingress']\n egress_counters = conf['counter']['egress']\n except:\n print(\"Bad yaml format in %s\" % snapshot_filename)\n sys.exit(-1)\n\n requester_port = port_map['requester']\n responder_port = port_map['responder']\n requester_mirror_port = port_map['requester-mirror']\n responder_mirror_port = port_map['responder-mirror']\n\n self._counter = {'requester' : {'ingress':0, 'egress': 0},\n 'responder' : {'ingress':0, 'egress': 0},\n 'requester-mirror' : {'ingress':0, 'egress': 0},\n 'responder-mirror' : {'ingress':0, 'egress': 0}}\n try:\n self._counter['requester']['ingress'] = ingress_counters[requester_port]\n self._counter['responder']['ingress'] = ingress_counters[responder_port]\n self._counter['requester-mirror']['ingress'] = ingress_counters[requester_mirror_port]\n self._counter['responder-mirror']['ingress'] = ingress_counters[responder_mirror_port]\n\n self._counter['requester']['egress'] = egress_counters[requester_port]\n self._counter['responder']['egress'] = egress_counters[responder_port]\n self._counter['requester-mirror']['egress'] = egress_counters[requester_mirror_port]\n self._counter['responder-mirror']['egress'] = egress_counters[responder_mirror_port]\n\n except:\n print(\"Port number not exist in the switch snapshot\")\n sys.exit(-1)\n\n def get_counter(self):\n \"\"\" Return the switch counters (dict of dict) \"\"\"\n return self._counter"
},
{
"identifier": "MLNXHostCounter",
"path": "lumina/analyzer/counter/host_counter.py",
"snippet": "class MLNXHostCounter(HostCounter):\n \"\"\" Class to parse MLNX host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_port_rcv_packets(self):\n \"\"\" Return the number of received packets \"\"\"\n return self._counter['port-counters']['port_rcv_packets']\n\n def get_port_xmit_packets(self):\n \"\"\" Return the number of transmitted packets \"\"\"\n return self._counter['port-counters']['port_xmit_packets']\n\n def get_num_packet_seq_err(self):\n \"\"\" Return the number of received NAK sequence error packets \"\"\"\n return self._counter['hw-counters']['packet_seq_err']\n\n def get_num_out_of_sequence(self):\n \"\"\" Return the number of out-of-sequence packets received \"\"\"\n return self._counter['hw-counters']['out_of_sequence']\n\n def get_num_dup_requests(self):\n \"\"\" Return the number of duplicate requests \"\"\"\n return self._counter['hw-counters']['duplicate_request']\n\n def implied_nak_seq_err(self):\n \"\"\" Return the number of READ requests implying sequence errors \"\"\"\n return self._counter['hw-counters']['implied_nak_seq_err']\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['np_cnp_sent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['np_ecn_marked_roce_packets']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['rp_cnp_handled']\n\n def get_num_icrc_errors(self):\n \"\"\" Return the number of RoCE packets with ICRC errors received \"\"\"\n return self._counter['hw-counters']['rx_icrc_encapsulated']\n\n def get_num_timeout_err(self):\n \"\"\" Return the number of times QP's ack timer expired for RC, XRC, DCT QPs at the sender side \"\"\"\n return self._counter['hw-counters']['local_ack_timeout_err']\n\n def get_num_discards_dict_tx(self):\n \"\"\" Return the number of TX discarded packets (dict)\"\"\"\n discards_dict_tx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'tx' in x:\n discards_dict_tx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_tx\n\n def get_num_discards_dict_rx(self):\n \"\"\" Return the number of RX discarded packets (dict) \"\"\"\n discards_dict_rx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'rx' in x:\n discards_dict_rx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_rx"
},
{
"identifier": "IntelHostCounter",
"path": "lumina/analyzer/counter/host_counter.py",
"snippet": "class IntelHostCounter(HostCounter):\n \"\"\" Class to parse Intel host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['cnpSent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['RxECNMrkd']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['cnpHandled']\n\n def get_num_discards_dict(self):\n \"\"\" Return the number of discarded packets (dict) \"\"\"\n discards_dict= {}\n for x in self._counter['hw-counters'].keys():\n if 'discard' in x:\n discards_dict[x] = self._counter['hw-counters'][x]\n return discards_dict"
},
{
"identifier": "get_packet_list",
"path": "lumina/analyzer/pcap_processor/pcap_process.py",
"snippet": "def get_packet_list(pcap_file):\n \"\"\" Read a pcap file and return a list of packets\n\n Args:\n pcap_file (str): The pcap file to read\n\n Returns:\n list: The list of packets if successful, empty list otherwise\n\n Raises:\n IOError: If the pcap file cannot be opened for reading\n Exception: If the pcap file cannot be read\n \"\"\"\n packet_list = []\n try:\n with open(pcap_file, 'rb') as file_read:\n pcap = dpkt.pcap.Reader(file_read)\n for packet in pcap:\n packet_list.append(roce_packet.RRoCEPacket(packet))\n except IOError:\n logging.error(\"Unable to open pcap file %s. Please check your filename.\" % pcap_file)\n raise IOError\n\n except:\n logging.error(\"Failed to read pcap file %s.\" % pcap_file)\n raise Exception\n\n logging.info(\"Successfully read %d packets from %s.\" % (len(packet_list), pcap_file))\n return packet_list"
},
{
"identifier": "config_stream_handler",
"path": "lumina/utils/config_loggers.py",
"snippet": "def config_stream_handler(logger):\n \"\"\" Configure stream handler\n\n Args:\n logger (logging.Logger): Logger object\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n logger.addHandler(console)"
},
{
"identifier": "config_file_handler",
"path": "lumina/utils/config_loggers.py",
"snippet": "def config_file_handler(logger, log_file, no_format=False):\n \"\"\" Configure file handler\n\n Args:\n logger (logging.Logger): Logger object\n log_file (str): Log file path\n no_format (bool): If True, do not format log messages (default: False)\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_file, mode=\"w\")\n if no_format == False:\n file_handler.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)"
}
] | import argparse, sys, yaml, os, math, logging
import lumina.analyzer.checker.integrity_check as integrity_check
import lumina.analyzer.checker.host_check as host_check
import lumina.analyzer.checker.gbn_check as gbn_check
import lumina.analyzer.checker.read_gbn_check as read_gbn_check
import lumina.analyzer.checker.cnp_check as cnp_check
import lumina.orchestrator.host as host
import lumina.orchestrator.switch as switch
from lumina.analyzer.counter.switch_counter import SwitchCounter
from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter
from lumina.analyzer.pcap_processor.pcap_process import get_packet_list
from lumina.utils.config_loggers import config_stream_handler, config_file_handler | 3,271 | """
This is the main entry point for the offline analyzer. It takes a config file as input and
performs the following tasks:
1. Check the integrity of the trace according to pcap files, and timestamps
2. Check the host counters
3. Check the traces and counters according to Go-Back-N (GBN) and Congestion Notification Packet (CNP) checkers
"""
## All logs will be logged into file LOG_FILENAME
LOG_FILENAME = "analysis.log"
## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME
RESULT_FILENAME = "result.out"
def get_qp_info_list(switch_msg_snapshot):
""" Get the list of QP info from the switch message snapshot
Args:
switch_msg_snapshot (str): The path to the switch message snapshot
Returns:
list of dict: The list of queue pair (QP) information if successful or None otherwise.
The list of QP information is in the following format:
[{'psn_rcv': initial packet sequence number from the receiver qp,
'psn_snd': initial packet sequence number from the sender qp,
'qpn_rcv': receiver qp number,
'qpn_snd': sender qp number,
'ip_rcv' : receiver IP
'ip_snd' : sender IP}]
"""
try:
with open(switch_msg_snapshot, 'r') as stream:
qp_info_list = yaml.safe_load(stream)
except:
logging.error("Read switch message snapshot %s error." % switch_msg_snapshot)
return None
logging.info("Read switch message snapshot %s." % switch_msg_snapshot)
return qp_info_list
def main(args):
""" Main function of the offline analyzer
Args:
args (argparser.Namespace): The parsed arguments
Returns:
N/A
"""
with open(args.config_file, "r") as stream:
conf = yaml.safe_load(stream)
try:
result_dir = conf['result-path']
num_repeats = conf['num-repeats']
mtu = conf['traffic']['mtu']
msg_size = conf['traffic']['message-size']
num_msgs_per_qp = conf['traffic']['num-msgs-per-qp']
port_map = {'requester': conf['requester']['nic']['switch-port'],
'responder': conf['responder']['nic']['switch-port'],
'requester-mirror': conf['requester-mirror']['nic']['switch-port'],
'responder-mirror': conf['responder-mirror']['nic']['switch-port']}
requester_nic_type = conf['requester']['nic']['type']
responder_nic_type = conf['responder']['nic']['type']
requester_nic_vendor = host.NIC_TYPE2VENDOR_MAP[requester_nic_type] \
if requester_nic_type in host.NIC_TYPE2VENDOR_MAP.keys() \
else host.NICVendor.Unkown
responder_nic_vendor = host.NIC_TYPE2VENDOR_MAP[responder_nic_type] \
if responder_nic_type in host.NIC_TYPE2VENDOR_MAP.keys() \
else host.NICVendor.Unkown
nic_vendor_map = {'requester': requester_nic_vendor, 'responder': responder_nic_vendor}
except KeyError as e:
print("Config file %s has a bad yaml format (key error: %s)" % (args.config_file, e))
sys.exit(1)
root_logger = logging.getLogger()
root_logger.handlers.clear()
| """
This is the main entry point for the offline analyzer. It takes a config file as input and
performs the following tasks:
1. Check the integrity of the trace according to pcap files, and timestamps
2. Check the host counters
3. Check the traces and counters according to Go-Back-N (GBN) and Congestion Notification Packet (CNP) checkers
"""
## All logs will be logged into file LOG_FILENAME
LOG_FILENAME = "analysis.log"
## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME
RESULT_FILENAME = "result.out"
def get_qp_info_list(switch_msg_snapshot):
""" Get the list of QP info from the switch message snapshot
Args:
switch_msg_snapshot (str): The path to the switch message snapshot
Returns:
list of dict: The list of queue pair (QP) information if successful or None otherwise.
The list of QP information is in the following format:
[{'psn_rcv': initial packet sequence number from the receiver qp,
'psn_snd': initial packet sequence number from the sender qp,
'qpn_rcv': receiver qp number,
'qpn_snd': sender qp number,
'ip_rcv' : receiver IP
'ip_snd' : sender IP}]
"""
try:
with open(switch_msg_snapshot, 'r') as stream:
qp_info_list = yaml.safe_load(stream)
except:
logging.error("Read switch message snapshot %s error." % switch_msg_snapshot)
return None
logging.info("Read switch message snapshot %s." % switch_msg_snapshot)
return qp_info_list
def main(args):
""" Main function of the offline analyzer
Args:
args (argparser.Namespace): The parsed arguments
Returns:
N/A
"""
with open(args.config_file, "r") as stream:
conf = yaml.safe_load(stream)
try:
result_dir = conf['result-path']
num_repeats = conf['num-repeats']
mtu = conf['traffic']['mtu']
msg_size = conf['traffic']['message-size']
num_msgs_per_qp = conf['traffic']['num-msgs-per-qp']
port_map = {'requester': conf['requester']['nic']['switch-port'],
'responder': conf['responder']['nic']['switch-port'],
'requester-mirror': conf['requester-mirror']['nic']['switch-port'],
'responder-mirror': conf['responder-mirror']['nic']['switch-port']}
requester_nic_type = conf['requester']['nic']['type']
responder_nic_type = conf['responder']['nic']['type']
requester_nic_vendor = host.NIC_TYPE2VENDOR_MAP[requester_nic_type] \
if requester_nic_type in host.NIC_TYPE2VENDOR_MAP.keys() \
else host.NICVendor.Unkown
responder_nic_vendor = host.NIC_TYPE2VENDOR_MAP[responder_nic_type] \
if responder_nic_type in host.NIC_TYPE2VENDOR_MAP.keys() \
else host.NICVendor.Unkown
nic_vendor_map = {'requester': requester_nic_vendor, 'responder': responder_nic_vendor}
except KeyError as e:
print("Config file %s has a bad yaml format (key error: %s)" % (args.config_file, e))
sys.exit(1)
root_logger = logging.getLogger()
root_logger.handlers.clear() | config_stream_handler(root_logger) | 4 | 2023-12-09 08:21:14+00:00 | 4k |
yilin-bao/nnanim | TestingCode/vit.py | [
{
"identifier": "EmbeddingStem",
"path": "TestingCode/patch_embed.py",
"snippet": "class EmbeddingStem(nn.Module):\n def __init__(\n self,\n image_size=224,\n patch_size=16,\n channels=3,\n embedding_dim=768,\n hidden_dims=None,\n conv_patch=False,\n linear_patch=False,\n conv_stem=True,\n conv_stem_original=True,\n conv_stem_scaled_relu=False,\n position_embedding_dropout=None,\n cls_head=True,\n ):\n super(EmbeddingStem, self).__init__()\n\n assert (\n sum([conv_patch, conv_stem, linear_patch]) == 1\n ), \"Only one of three modes should be active\"\n\n image_height, image_width = pair(image_size)\n patch_height, patch_width = pair(patch_size)\n\n assert (\n image_height % patch_height == 0 and image_width % patch_width == 0\n ), \"Image dimensions must be divisible by the patch size.\"\n\n assert not (\n conv_stem and cls_head\n ), \"Cannot use [CLS] token approach with full conv stems for ViT\"\n\n if linear_patch or conv_patch:\n self.grid_size = (\n image_height // patch_height,\n image_width // patch_width,\n )\n num_patches = self.grid_size[0] * self.grid_size[1]\n\n if cls_head:\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embedding_dim))\n num_patches += 1\n\n # positional embedding\n self.pos_embed = nn.Parameter(\n torch.zeros(1, num_patches, embedding_dim)\n )\n self.pos_drop = nn.Dropout(p=position_embedding_dropout)\n\n if conv_patch:\n self.projection = nn.Sequential(\n nn.Conv2d(\n channels,\n embedding_dim,\n kernel_size=patch_size,\n stride=patch_size,\n ),\n )\n elif linear_patch:\n patch_dim = channels * patch_height * patch_width\n self.projection = nn.Sequential(\n Rearrange(\n 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)',\n p1=patch_height,\n p2=patch_width,\n ),\n nn.Linear(patch_dim, embedding_dim),\n )\n elif conv_stem:\n assert (\n conv_stem_scaled_relu ^ conv_stem_original\n ), \"Can use either the original or the scaled relu stem\"\n\n if not isinstance(hidden_dims, list):\n raise ValueError(\"Cannot create stem without list of sizes\")\n\n if conv_stem_original:\n \"\"\"\n Conv stem from https://arxiv.org/pdf/2106.14881.pdf\n \"\"\"\n\n hidden_dims.insert(0, channels)\n modules = []\n for i, (in_ch, out_ch) in enumerate(\n zip(hidden_dims[:-1], hidden_dims[1:])\n ):\n modules.append(\n nn.Conv2d(\n in_ch,\n out_ch,\n kernel_size=3,\n stride=2 if in_ch != out_ch else 1,\n padding=1,\n bias=False,\n ),\n )\n modules.append(nn.BatchNorm2d(out_ch),)\n modules.append(nn.ReLU(inplace=True))\n\n modules.append(\n nn.Conv2d(\n hidden_dims[-1], embedding_dim, kernel_size=1, stride=1,\n ),\n )\n self.projection = nn.Sequential(*modules)\n\n elif conv_stem_scaled_relu:\n \"\"\"\n Conv stem from https://arxiv.org/pdf/2109.03810.pdf\n \"\"\"\n assert (\n len(hidden_dims) == 1\n ), \"Only one value for hidden_dim is allowed\"\n mid_ch = hidden_dims[0]\n\n # fmt: off\n self.projection = nn.Sequential(\n nn.Conv2d(\n channels, mid_ch,\n kernel_size=7, stride=2, padding=3, bias=False,\n ),\n nn.BatchNorm2d(mid_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(\n mid_ch, mid_ch,\n kernel_size=3, stride=1, padding=1, bias=False,\n ),\n nn.BatchNorm2d(mid_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(\n mid_ch, mid_ch,\n kernel_size=3, stride=1, padding=1, bias=False,\n ),\n nn.BatchNorm2d(mid_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(\n mid_ch, embedding_dim,\n kernel_size=patch_size // 2, stride=patch_size // 2,\n ),\n )\n # fmt: on\n\n else:\n raise ValueError(\"Undefined convolutional stem type defined\")\n\n self.conv_stem = conv_stem\n self.conv_patch = conv_patch\n self.linear_patch = linear_patch\n self.cls_head = cls_head\n\n self._init_weights()\n\n def _init_weights(self):\n if not self.conv_stem:\n trunc_normal_(self.pos_embed, std=0.02)\n\n def forward(self, x):\n if self.conv_stem:\n x = self.projection(x)\n x = x.flatten(2).transpose(1, 2)\n return x\n\n # paths for cls_token / position embedding\n elif self.linear_patch:\n x = self.projection(x)\n elif self.conv_patch:\n x = self.projection(x)\n x = x.flatten(2).transpose(1, 2)\n\n if self.cls_head:\n cls_token = self.cls_token.expand(x.shape[0], -1, -1)\n x = torch.cat((cls_token, x), dim=1)\n return self.pos_drop(x + self.pos_embed)"
},
{
"identifier": "Transformer",
"path": "TestingCode/transformer.py",
"snippet": "class Transformer(nn.Module):\n def __init__(\n self,\n dim,\n depth,\n heads,\n mlp_ratio=4.0,\n attn_dropout=0.0,\n dropout=0.0,\n qkv_bias=True,\n revised=False,\n ):\n super().__init__()\n self.layers = nn.ModuleList([])\n\n assert isinstance(\n mlp_ratio, float\n ), \"MLP ratio should be an integer for valid \"\n mlp_dim = int(mlp_ratio * dim)\n\n for _ in range(depth):\n self.layers.append(\n nn.ModuleList(\n [\n PreNorm(\n dim,\n Attention(\n dim,\n num_heads=heads,\n qkv_bias=qkv_bias,\n attn_drop=attn_dropout,\n proj_drop=dropout,\n ),\n ),\n PreNorm(\n dim,\n FeedForward(dim, mlp_dim, dropout_rate=dropout,),\n )\n if not revised\n else FeedForward(\n dim, mlp_dim, dropout_rate=dropout, revised=True,\n ),\n ]\n )\n )\n\n def forward(self, x):\n for attn, ff in self.layers:\n x = attn(x) + x\n x = ff(x) + x\n return x"
},
{
"identifier": "OutputLayer",
"path": "TestingCode/modules.py",
"snippet": "class OutputLayer(nn.Module):\n def __init__(\n self,\n embedding_dim,\n num_classes=1000,\n representation_size=None,\n cls_head=False,\n ):\n super(OutputLayer, self).__init__()\n\n self.num_classes = num_classes\n modules = []\n if representation_size:\n modules.append(nn.Linear(embedding_dim, representation_size))\n modules.append(nn.Tanh())\n modules.append(nn.Linear(representation_size, num_classes))\n else:\n modules.append(nn.Linear(embedding_dim, num_classes))\n\n self.net = nn.Sequential(*modules)\n\n if cls_head:\n self.to_cls_token = nn.Identity()\n\n self.cls_head = cls_head\n self.num_classes = num_classes\n self._init_weights()\n\n def _init_weights(self):\n for name, module in self.net.named_children():\n if isinstance(module, nn.Linear):\n if module.weight.shape[0] == self.num_classes:\n nn.init.zeros_(module.weight)\n nn.init.zeros_(module.bias)\n\n def forward(self, x):\n if self.cls_head:\n x = self.to_cls_token(x[:, 0])\n else:\n \"\"\"\n Scaling Vision Transformer: https://arxiv.org/abs/2106.04560\n \"\"\"\n x = torch.mean(x, dim=1)\n\n return self.net(x)"
}
] | import torch.nn as nn
from TestingCode.patch_embed import EmbeddingStem
from TestingCode.transformer import Transformer
from TestingCode.modules import OutputLayer | 2,458 |
class VisionTransformer(nn.Module):
def __init__(
self,
image_size=224,
patch_size=16,
in_channels=3,
embedding_dim=768,
num_layers=12,
num_heads=12,
qkv_bias=True,
mlp_ratio=4.0,
use_revised_ffn=False,
dropout_rate=0.0,
attn_dropout_rate=0.0,
use_conv_stem=True,
use_conv_patch=False,
use_linear_patch=False,
use_conv_stem_original=True,
use_stem_scaled_relu=False,
hidden_dims=None,
cls_head=False,
num_classes=1000,
representation_size=None,
):
super(VisionTransformer, self).__init__()
# embedding layer
self.embedding_layer = EmbeddingStem(
image_size=image_size,
patch_size=patch_size,
channels=in_channels,
embedding_dim=embedding_dim,
hidden_dims=hidden_dims,
conv_patch=use_conv_patch,
linear_patch=use_linear_patch,
conv_stem=use_conv_stem,
conv_stem_original=use_conv_stem_original,
conv_stem_scaled_relu=use_stem_scaled_relu,
position_embedding_dropout=dropout_rate,
cls_head=cls_head,
)
# transformer
self.transformer = Transformer(
dim=embedding_dim,
depth=num_layers,
heads=num_heads,
mlp_ratio=mlp_ratio,
attn_dropout=attn_dropout_rate,
dropout=dropout_rate,
qkv_bias=qkv_bias,
revised=use_revised_ffn,
)
self.post_transformer_ln = nn.LayerNorm(embedding_dim)
# output layer
|
class VisionTransformer(nn.Module):
def __init__(
self,
image_size=224,
patch_size=16,
in_channels=3,
embedding_dim=768,
num_layers=12,
num_heads=12,
qkv_bias=True,
mlp_ratio=4.0,
use_revised_ffn=False,
dropout_rate=0.0,
attn_dropout_rate=0.0,
use_conv_stem=True,
use_conv_patch=False,
use_linear_patch=False,
use_conv_stem_original=True,
use_stem_scaled_relu=False,
hidden_dims=None,
cls_head=False,
num_classes=1000,
representation_size=None,
):
super(VisionTransformer, self).__init__()
# embedding layer
self.embedding_layer = EmbeddingStem(
image_size=image_size,
patch_size=patch_size,
channels=in_channels,
embedding_dim=embedding_dim,
hidden_dims=hidden_dims,
conv_patch=use_conv_patch,
linear_patch=use_linear_patch,
conv_stem=use_conv_stem,
conv_stem_original=use_conv_stem_original,
conv_stem_scaled_relu=use_stem_scaled_relu,
position_embedding_dropout=dropout_rate,
cls_head=cls_head,
)
# transformer
self.transformer = Transformer(
dim=embedding_dim,
depth=num_layers,
heads=num_heads,
mlp_ratio=mlp_ratio,
attn_dropout=attn_dropout_rate,
dropout=dropout_rate,
qkv_bias=qkv_bias,
revised=use_revised_ffn,
)
self.post_transformer_ln = nn.LayerNorm(embedding_dim)
# output layer | self.cls_layer = OutputLayer( | 2 | 2023-12-05 22:01:06+00:00 | 4k |
equilibration/equipy | equipy/fairness/_wasserstein.py | [
{
"identifier": "_check_epsilon",
"path": "equipy/utils/checkers.py",
"snippet": "def _check_epsilon(epsilon):\n \"\"\"\n Check if epsilon (fairness parameter) is within the valid range [0, 1].\n\n Parameters\n ----------\n epsilon : float\n Fairness parameter controlling the trade-off between fairness and accuracy.\n\n Raises\n ------\n ValueError\n If epsilon is outside the valid range [0, 1].\n \"\"\"\n if epsilon < 0 or epsilon > 1:\n raise ValueError(\n 'epsilon must be between 0 and 1')"
},
{
"identifier": "_check_epsilon_size",
"path": "equipy/utils/checkers.py",
"snippet": "def _check_epsilon_size(epsilon, sensitive_features):\n \"\"\"\n Check if the epsilon list matches the number of sensitive features.\n\n Parameters\n ----------\n epsilon : list, shape (n_sensitive_features,)\n Fairness parameters controlling the trade-off between fairness and accuracy for each sensitive feature.\n\n sensitive_features : array-like, shape (n_samples, n_sensitive_features)\n Test samples representing multiple sensitive attributes.\n\n Raises\n ------\n ValueError\n If the length of epsilon does not match the number of sensitive features.\n \"\"\"\n\n if sensitive_features.ndim == 1:\n if len(epsilon) != 1:\n raise ValueError(\n 'epsilon must have the same length than the number of sensitive features')\n else:\n if len(epsilon) != np.shape(sensitive_features)[1]:\n raise ValueError(\n 'epsilon must have the same length than the number of sensitive features')"
},
{
"identifier": "_check_mod",
"path": "equipy/utils/checkers.py",
"snippet": "def _check_mod(modalities_calib, modalities_test):\n \"\"\"\n Check if modalities in test data are included in calibration data's modalities.\n\n Parameters\n ----------\n modalities_calib : list\n Modalities from the calibration data.\n modalities_test : list\n Modalities from the test data.\n\n Raises\n ------\n ValueError\n If modalities in test data are not present in calibration data.\n \"\"\"\n missing_modalities = set(modalities_test) - set(modalities_calib)\n if len(missing_modalities) != 0:\n raise ValueError(\n f\"The following modalities of the test sensitive features are not in modalities of the calibration sensitive features: {missing_modalities}\")"
},
{
"identifier": "_check_shape",
"path": "equipy/utils/checkers.py",
"snippet": "def _check_shape(y, sensitive_feature):\n \"\"\"\n Check the shape and data types of input arrays y and sensitive_feature.\n\n Parameters\n ----------\n y : array-like\n Target values of the data.\n sensitive_feature : array-like\n Input samples representing the sensitive attribute.\n\n Raises\n ------\n ValueError\n If the input arrays have incorrect shapes or data types.\n \"\"\"\n if not isinstance(sensitive_feature, np.ndarray):\n raise ValueError('sensitive_features must be an array')\n\n if not isinstance(y, np.ndarray):\n raise ValueError('y must be an array')\n\n if len(sensitive_feature) != len(y):\n raise ValueError(\n 'sensitive_features and y should have the same length')\n\n if len(np.unique(sensitive_feature)) == 1:\n raise ValueError(\n \"At least one of your sensitive attributes contains only one modality and so it is already fair. Remove it from your sensitive features.\")\n\n if not (np.issubdtype(y.dtype, np.floating) or np.issubdtype(y.dtype, np.integer)):\n raise ValueError('y should contain only float or integer numbers')"
},
{
"identifier": "_check_nb_observations",
"path": "equipy/utils/checkers.py",
"snippet": "def _check_nb_observations(sensitive_features):\n if sensitive_features.ndim == 1 & len(sensitive_features) == 1:\n raise ValueError(\"Fairness can't be applied on a single observation\")\n if sensitive_features.ndim == 2 & np.shape(sensitive_features)[1] == 1:\n raise ValueError(\"Fairness can't be applied on a single observation\")"
},
{
"identifier": "BaseHelper",
"path": "equipy/fairness/_base.py",
"snippet": "class BaseHelper():\n \"\"\"\n Base class providing helper methods for Wasserstein distance-based fairness adjustment.\n\n Attributes\n ----------\n ecdf : dict\n Dictionary storing ECDF (Empirical Cumulative Distribution Function) objects for each sensitive modality.\n eqf : dict\n Dictionary storing EQF (Empirical Quantile Function) objects for each sensitive modality.\n\n Methods\n -------\n _get_modalities(sensitive_feature)\n Get unique modalities from the input sensitive attribute array.\n _get_location_modalities(sensitive_feature)\n Get the indices of occurrences for each modality in the input sensitive attribute array.\n _get_weights(sensitive_feature)\n Calculate weights (probabilities) for each modality based on their occurrences.\n _estimate_ecdf_eqf(y, sensitive_feature, sigma)\n Estimate ECDF and EQF for each modality, incorporating random noise within [-sigma, sigma].\n _get_correction(self, weights, mod, y_with_noise, location_modalities, modalities_test)\n Calculate correction of y.\n _fair_y_values(self, y, modalities_test, location_modalities, weights)\n Apply fairness correction to input values.\n\n Notes\n -----\n This base class provides essential methods for Wasserstein distance-based fairness adjustment. It includes\n methods for modality extraction, localization of modalities in the input data, weight calculation, and ECDF/EQF \n estimation with random noise.\n \"\"\"\n\n def __init__(self):\n self.ecdf = {}\n self.eqf = {}\n\n self.weights = {}\n\n def _get_modalities(self, sensitive_feature):\n \"\"\"\n Get unique modalities from the input sensitive attribute array.\n\n Parameters\n ----------\n sensitive_feature : array-like, shape (n_samples,)\n Input samples representing the sensitive attributes.\n\n Returns\n -------\n list\n List of unique modalities present in the input sensitive attribute array.\n \"\"\"\n return set(sensitive_feature)\n\n def _get_location_modalities(self, sensitive_feature):\n \"\"\"\n Get the indices of occurrences for each modality in the input sensitive attribute array.\n\n Parameters\n ----------\n sensitive_feature : array-like, shape (n_samples,)\n Input sample representing the sensitive attribute.\n\n Returns\n -------\n dict\n Dictionary where keys are modalities and values are arrays containing their indices.\n \"\"\"\n location_modalities = {}\n for modality in self._get_modalities(sensitive_feature):\n location_modalities[modality] = np.where(\n sensitive_feature == modality)[0]\n return location_modalities\n\n def _compute_weights(self, sensitive_feature):\n \"\"\"\n Calculate weights (probabilities) for each modality based on their occurrences.\n\n Parameters\n ----------\n sensitive_feature : array-like, shape (n_samples,)\n Input samples representing the sensitive attribute.\n\n Returns\n -------\n dict\n Dictionary where keys are modalities and values are their corresponding weights.\n \"\"\"\n location_modalities = self._get_location_modalities(sensitive_feature)\n for modality in self._get_modalities(sensitive_feature):\n self.weights[modality] = len(\n location_modalities[modality])/len(sensitive_feature)\n\n def _estimate_ecdf_eqf(self, y, sensitive_feature, sigma):\n \"\"\"\n Estimate ECDF and EQF for each modality, incorporating random noise within [-sigma, sigma].\n\n Parameters\n ----------\n y : array-like, shape (n_samples,)\n Target values corresponding to the sensitive attribute array.\n sensitive_feature : array-like, shape (n_samples,)\n Input samples representing the sensitive attribute.\n sigma : float\n Standard deviation of the random noise added to the data.\n\n Returns\n -------\n None\n \"\"\"\n location_modalities = self._get_location_modalities(sensitive_feature)\n eps = np.random.uniform(-sigma, sigma, len(y))\n for modality in self._get_modalities(sensitive_feature):\n self.ecdf[modality] = ECDF(y[location_modalities[modality]] +\n eps[location_modalities[modality]])\n self.eqf[modality] = EQF(\n y[location_modalities[modality]]+eps[location_modalities[modality]])\n\n def _get_correction(self, mod, y_with_noise, location_modalities, modalities_test):\n \"\"\"\n Calculate correction of y.\n\n Parameters\n ----------\n weights : dict\n A dictionary of weights for each modality.\n mod : str\n The current modality for which the correction is calculated.\n y_with_noise : np.ndarray\n y plus a random noise.\n location_modalities : dict\n A dictionary mapping modalities to their locations.\n modalities_test : set\n Set of modalities for which correction is calculated.\n\n Returns\n -------\n float\n The correction value.\n \"\"\"\n correction = 0\n for _mod in modalities_test:\n correction += self.weights[_mod] * self.eqf[_mod](\n self.ecdf[mod](y_with_noise[location_modalities[mod]]))\n return correction\n\n def _fair_y_values(self, y, sensitive_feature, modalities_test):\n \"\"\"\n Apply fairness correction to input values.\n\n Parameters\n ----------\n y : np.ndarray\n Input values.\n sensitive_features : np.ndarray, shape (n_samples, n_sensitive_features)\n The test samples representing multiple sensitive attributes.\n modalities_test : list\n List of modalities for correction.\n weights : dict\n A dictionary of weights for each modality.\n\n Returns\n -------\n np.ndarray\n Fair values after applying correction.\n \"\"\"\n location_modalities = self._get_location_modalities(sensitive_feature)\n y_fair = np.zeros_like(y)\n eps = np.random.uniform(-self.sigma, self.sigma, len(y))\n y_with_noise = y + eps\n for mod in modalities_test:\n y_fair[location_modalities[mod]] += self._get_correction(\n mod, y_with_noise, location_modalities, modalities_test)\n return y_fair"
}
] | import numpy as np
from ..utils.checkers import _check_epsilon, _check_epsilon_size, _check_mod, _check_shape, _check_nb_observations
from ._base import BaseHelper | 3,486 |
class FairWasserstein(BaseHelper):
"""
Class implementing Wasserstein distance-based fairness adjustment for binary classification tasks.
Parameters
----------
sigma : float, optional (default=0.0001)
Standard deviation of the random noise added during fairness adjustment.
Attributes
----------
sigma : float
Standard deviation of the random noise added during fairness adjustment.
modalities_calib : dict
Dictionary storing modality values obtained from calibration data.
weights : dict
Dictionary storing weights (probabilities) for each modality based on their occurrences in calibration data.
ecdf : dict
Dictionary storing ECDF (Empirical Cumulative Distribution Function) objects for each sensitive modality.
eqf : dict
Dictionary storing EQF (Empirical Quantile Function) objects for each sensitive modality.
Methods
-------
fit(y, sensitive_feature)
Fit the fairness adjustment model using calibration data.
transform(y, sensitive_feature, epsilon=0)
Transform test data to enforce fairness using Wasserstein distance.
"""
def __init__(self, sigma=0.0001):
super().__init__()
self.sigma = sigma
self.modalities_calib = None
def fit(self, y, sensitive_feature):
"""
Perform fit on the calibration data and save the ECDF, EQF, and weights of the sensitive variable.
Parameters
----------
y : array-like, shape (n_samples,)
The calibration labels.
sensitive_feature : array-like, shape (n_samples,)
The calibration samples representing one single sensitive attribute.
Returns
-------
None
Notes
-----
This method computes the ECDF (Empirical Cumulative Distribution Function),
EQF (Empirical Quantile Function), and weights for the sensitive variable
based on the provided calibration data. These computed values are used
during the transformation process to ensure fairness in predictions.
Examples
--------
>>> wasserstein = FairWasserstein(sigma=0.001)
>>> y = np.array([0.0, 1.0, 1.0, 0.0])
>>> sensitive_feature = np.array([1, 2, 0, 2])
>>> wasserstein.fit(y, sensitive_feature)
"""
_check_shape(y, sensitive_feature)
self.modalities_calib = self._get_modalities(sensitive_feature)
self._compute_weights(sensitive_feature)
self._estimate_ecdf_eqf(y, sensitive_feature, self.sigma)
def transform(self, y, sensitive_feature, epsilon=0):
"""
Transform the test data to enforce fairness using Wasserstein distance.
Parameters
----------
y : array-like, shape (n_samples,)
The target values of the test data.
sensitive_feature : array-like, shape (n_samples,)
The test samples representing a single sensitive attribute.
epsilon : float, optional (default=0)
The fairness parameter controlling the trade-off between fairness and accuracy.
It represents the fraction of the original predictions retained after fairness adjustment.
Epsilon should be a value between 0 and 1, where 0 means full fairness and 1 means no fairness constraint.
Returns
-------
y_fair : array-like, shape (n_samples,)
Fair predictions for the test data after enforcing fairness constraints.
Notes
-----
This method applies Wasserstein distance-based fairness adjustment to the test data
using the precomputed ECDF (Empirical Cumulative Distribution Function),
EQF (Empirical Quantile Function), and weights obtained from the calibration data.
Random noise within the range of [-sigma, sigma] is added to the test data to ensure fairness.
The parameter epsilon controls the trade-off between fairness and accuracy,
with 0 enforcing full fairness and 1 retaining the original predictions.
Examples
--------
>>> y = np.array([0.05, 0.08, 0.9, 0.9, 0.01, 0.88])
>>> sensitive_feature = np.array([1, 3, 2, 3, 1, 2])
>>> wasserstein = FairWasserstein(sigma=0.001)
>>> wasserstein.fit(y, sensitive_feature)
>>> y = np.array([0.01, 0.99, 0.98, 0.04])
>>> sensitive_feature = np.array([3, 1, 2, 3])
>>> print(wasserstein.transform(y, sensitive_feature, epsilon=0.2))
[0.26063673 0.69140959 0.68940959 0.26663673]
"""
_check_epsilon(epsilon)
_check_shape(y, sensitive_feature)
modalities_test = self._get_modalities(sensitive_feature)
|
class FairWasserstein(BaseHelper):
"""
Class implementing Wasserstein distance-based fairness adjustment for binary classification tasks.
Parameters
----------
sigma : float, optional (default=0.0001)
Standard deviation of the random noise added during fairness adjustment.
Attributes
----------
sigma : float
Standard deviation of the random noise added during fairness adjustment.
modalities_calib : dict
Dictionary storing modality values obtained from calibration data.
weights : dict
Dictionary storing weights (probabilities) for each modality based on their occurrences in calibration data.
ecdf : dict
Dictionary storing ECDF (Empirical Cumulative Distribution Function) objects for each sensitive modality.
eqf : dict
Dictionary storing EQF (Empirical Quantile Function) objects for each sensitive modality.
Methods
-------
fit(y, sensitive_feature)
Fit the fairness adjustment model using calibration data.
transform(y, sensitive_feature, epsilon=0)
Transform test data to enforce fairness using Wasserstein distance.
"""
def __init__(self, sigma=0.0001):
super().__init__()
self.sigma = sigma
self.modalities_calib = None
def fit(self, y, sensitive_feature):
"""
Perform fit on the calibration data and save the ECDF, EQF, and weights of the sensitive variable.
Parameters
----------
y : array-like, shape (n_samples,)
The calibration labels.
sensitive_feature : array-like, shape (n_samples,)
The calibration samples representing one single sensitive attribute.
Returns
-------
None
Notes
-----
This method computes the ECDF (Empirical Cumulative Distribution Function),
EQF (Empirical Quantile Function), and weights for the sensitive variable
based on the provided calibration data. These computed values are used
during the transformation process to ensure fairness in predictions.
Examples
--------
>>> wasserstein = FairWasserstein(sigma=0.001)
>>> y = np.array([0.0, 1.0, 1.0, 0.0])
>>> sensitive_feature = np.array([1, 2, 0, 2])
>>> wasserstein.fit(y, sensitive_feature)
"""
_check_shape(y, sensitive_feature)
self.modalities_calib = self._get_modalities(sensitive_feature)
self._compute_weights(sensitive_feature)
self._estimate_ecdf_eqf(y, sensitive_feature, self.sigma)
def transform(self, y, sensitive_feature, epsilon=0):
"""
Transform the test data to enforce fairness using Wasserstein distance.
Parameters
----------
y : array-like, shape (n_samples,)
The target values of the test data.
sensitive_feature : array-like, shape (n_samples,)
The test samples representing a single sensitive attribute.
epsilon : float, optional (default=0)
The fairness parameter controlling the trade-off between fairness and accuracy.
It represents the fraction of the original predictions retained after fairness adjustment.
Epsilon should be a value between 0 and 1, where 0 means full fairness and 1 means no fairness constraint.
Returns
-------
y_fair : array-like, shape (n_samples,)
Fair predictions for the test data after enforcing fairness constraints.
Notes
-----
This method applies Wasserstein distance-based fairness adjustment to the test data
using the precomputed ECDF (Empirical Cumulative Distribution Function),
EQF (Empirical Quantile Function), and weights obtained from the calibration data.
Random noise within the range of [-sigma, sigma] is added to the test data to ensure fairness.
The parameter epsilon controls the trade-off between fairness and accuracy,
with 0 enforcing full fairness and 1 retaining the original predictions.
Examples
--------
>>> y = np.array([0.05, 0.08, 0.9, 0.9, 0.01, 0.88])
>>> sensitive_feature = np.array([1, 3, 2, 3, 1, 2])
>>> wasserstein = FairWasserstein(sigma=0.001)
>>> wasserstein.fit(y, sensitive_feature)
>>> y = np.array([0.01, 0.99, 0.98, 0.04])
>>> sensitive_feature = np.array([3, 1, 2, 3])
>>> print(wasserstein.transform(y, sensitive_feature, epsilon=0.2))
[0.26063673 0.69140959 0.68940959 0.26663673]
"""
_check_epsilon(epsilon)
_check_shape(y, sensitive_feature)
modalities_test = self._get_modalities(sensitive_feature) | _check_mod(self.modalities_calib, modalities_test) | 2 | 2023-12-06 14:43:41+00:00 | 4k |
Tlntin/booking_simulator | modelscope_agent/tools/code_interpreter_utils/language_map.py | [
{
"identifier": "AppleScript",
"path": "modelscope_agent/tools/code_interpreter_utils/languages/applescript.py",
"snippet": "class AppleScript(SubprocessCodeInterpreter):\n file_extension = 'applescript'\n proper_name = 'AppleScript'\n\n def __init__(self):\n super().__init__()\n self.start_cmd = os.environ.get('SHELL', '/bin/zsh')\n\n def preprocess_code(self, code):\n \"\"\"\n Inserts an end_of_execution marker and adds active line indicators.\n \"\"\"\n # Add active line indicators to the code\n code = self.add_active_line_indicators(code)\n\n # Escape double quotes\n code = code.replace('\"', r\"\\\"\")\n\n # Wrap in double quotes\n code = '\"' + code + '\"'\n\n # Prepend start command for AppleScript\n code = 'osascript -e ' + code\n\n # Append end of execution indicator\n code += '; echo \"##end_of_execution##\"'\n\n return code\n\n def add_active_line_indicators(self, code):\n \"\"\"\n Adds log commands to indicate the active line of execution in the AppleScript.\n \"\"\"\n modified_lines = []\n lines = code.split('\\n')\n\n for idx, line in enumerate(lines):\n # Add log command to indicate the line number\n if line.strip(): # Only add if line is not empty\n modified_lines.append(f'log \"##active_line{idx + 1}##\"')\n modified_lines.append(line)\n\n return '\\n'.join(modified_lines)\n\n def detect_active_line(self, line):\n \"\"\"\n Detects active line indicator in the output.\n \"\"\"\n prefix = '##active_line'\n if prefix in line:\n try:\n return int(line.split(prefix)[1].split()[0])\n except Exception as e:\n print(e)\n pass\n return None\n\n def detect_end_of_execution(self, line):\n \"\"\"\n Detects end of execution marker in the output.\n \"\"\"\n return '##end_of_execution##' in line"
},
{
"identifier": "HTML",
"path": "modelscope_agent/tools/code_interpreter_utils/languages/html.py",
"snippet": "class HTML(BaseCodeInterpreter):\n file_extension = 'html'\n proper_name = 'HTML'\n\n def __init__(self):\n super().__init__()\n\n def run(self, code):\n # Create a temporary HTML file with the content\n with tempfile.NamedTemporaryFile(delete=False, suffix='.html') as f:\n f.write(code.encode())\n\n # Open the HTML file with the default web browser\n webbrowser.open('file://' + os.path.realpath(f.name))\n\n yield {\n 'output':\n f\"Saved to {os.path.realpath(f.name)} and opened with the user's default web browser.\"\n }"
},
{
"identifier": "JavaScript",
"path": "modelscope_agent/tools/code_interpreter_utils/languages/javascript.py",
"snippet": "class JavaScript(SubprocessCodeInterpreter):\n file_extension = 'js'\n proper_name = 'JavaScript'\n\n def __init__(self):\n super().__init__()\n self.start_cmd = 'node -i'\n\n def preprocess_code(self, code):\n return preprocess_javascript(code)\n\n def line_postprocessor(self, line):\n # Node's interactive REPL outputs a billion things\n # So we clean it up:\n if 'Welcome to Node.js' in line:\n return None\n if line.strip() in ['undefined', 'Type \".help\" for more information.']:\n return None\n # Remove trailing \">\"s\n line = re.sub(r'^\\s*(>\\s*)+', '', line)\n return line\n\n def detect_active_line(self, line):\n if '##active_line' in line:\n return int(line.split('##active_line')[1].split('##')[0])\n return None\n\n def detect_end_of_execution(self, line):\n return '##end_of_execution##' in line"
},
{
"identifier": "PowerShell",
"path": "modelscope_agent/tools/code_interpreter_utils/languages/powershell.py",
"snippet": "class PowerShell(SubprocessCodeInterpreter):\n file_extension = 'ps1'\n proper_name = 'PowerShell'\n\n def __init__(self):\n super().__init__()\n\n # Determine the start command based on the platform (use \"powershell\" for Windows)\n if platform.system() == 'Windows':\n self.start_cmd = 'powershell.exe'\n # self.start_cmd = os.environ.get('SHELL', 'powershell.exe')\n else:\n # On non-Windows platforms, prefer pwsh (PowerShell Core) if available, or fall back to bash\n self.start_cmd = 'pwsh' if shutil.which('pwsh') else 'bash'\n\n def preprocess_code(self, code):\n return preprocess_powershell(code)\n\n def line_postprocessor(self, line):\n return line\n\n def detect_active_line(self, line):\n if '##active_line' in line:\n return int(line.split('##active_line')[1].split('##')[0])\n return None\n\n def detect_end_of_execution(self, line):\n return '##end_of_execution##' in line"
},
{
"identifier": "Python",
"path": "modelscope_agent/tools/code_interpreter_utils/languages/python.py",
"snippet": "class Python(SubprocessCodeInterpreter):\n file_extension = 'py'\n proper_name = 'Python'\n\n def __init__(self):\n super().__init__()\n executable = sys.executable\n if os.name != 'nt': # not Windows\n executable = shlex.quote(executable)\n self.start_cmd = executable + ' -i -q -u'\n\n def preprocess_code(self, code):\n return preprocess_python(code)\n\n def line_postprocessor(self, line):\n if re.match(r'^(\\s*>>>\\s*|\\s*\\.\\.\\.\\s*)', line):\n return None\n return line\n\n def detect_active_line(self, line):\n if '##active_line' in line:\n return int(line.split('##active_line')[1].split('##')[0])\n return None\n\n def detect_end_of_execution(self, line):\n return '##end_of_execution##' in line"
},
{
"identifier": "R",
"path": "modelscope_agent/tools/code_interpreter_utils/languages/r.py",
"snippet": "class R(SubprocessCodeInterpreter):\n file_extension = 'r'\n proper_name = 'R'\n\n def __init__(self):\n super().__init__()\n self.start_cmd = 'R -q --vanilla' # Start R in quiet and vanilla mode\n\n def preprocess_code(self, code):\n \"\"\"\n Add active line markers\n Wrap in a tryCatch for better error handling in R\n Add end of execution marker\n \"\"\"\n\n lines = code.split('\\n')\n processed_lines = []\n\n for i, line in enumerate(lines, 1):\n # Add active line print\n processed_lines.append(f'cat(\"##active_line{i}##\\\\n\");{line}')\n\n # Join lines to form the processed code\n processed_code = '\\n'.join(processed_lines)\n\n # Wrap in a tryCatch for error handling and add end of execution marker\n processed_code = f\"\"\"\ntryCatch({{\n{processed_code}\n}}, error=function(e){{\n cat(\"## execution_error ##\\\\n\", conditionMessage(e), \"\\\\n\");\n}})\ncat(\"## end_of_execution ##\\\\n\");\n\"\"\"\n # Count the number of lines of processed_code\n # (R echoes all code back for some reason, but we can skip it if we track this!)\n self.code_line_count = len(processed_code.split('\\n')) - 1\n\n return processed_code\n\n def line_postprocessor(self, line):\n # If the line count attribute is set and non-zero, decrement and skip the line\n if hasattr(self, 'code_line_count') and self.code_line_count > 0:\n self.code_line_count -= 1\n return None\n\n if re.match(r'^(\\s*>>>\\s*|\\s*\\.\\.\\.\\s*|\\s*>\\s*|\\s*\\+\\s*|\\s*)$', line):\n return None\n if 'R version' in line: # Startup message\n return None\n if line.strip().startswith('[1] \"') and line.endswith(\n '\"'): # For strings, trim quotation marks\n return line[5:-1].strip()\n if line.strip().startswith(\n '[1]'): # Normal R output prefix for non-string outputs\n return line[4:].strip()\n\n return line\n\n def detect_active_line(self, line):\n if '##active_line' in line:\n return int(line.split('##active_line')[1].split('##')[0])\n return None\n\n def detect_end_of_execution(self, line):\n return '##end_of_execution##' in line or '## execution_error ##' in line"
},
{
"identifier": "Shell",
"path": "modelscope_agent/tools/code_interpreter_utils/languages/shell.py",
"snippet": "class Shell(SubprocessCodeInterpreter):\n file_extension = 'sh'\n proper_name = 'Shell'\n\n def __init__(self):\n super().__init__()\n\n # Determine the start command based on the platform\n if platform.system() == 'Windows':\n self.start_cmd = 'cmd.exe'\n else:\n self.start_cmd = os.environ.get('SHELL', 'bash')\n\n def preprocess_code(self, code):\n return preprocess_shell(code)\n\n def line_postprocessor(self, line):\n return line\n\n def detect_active_line(self, line):\n if '##active_line' in line:\n return int(line.split('##active_line')[1].split('##')[0])\n return None\n\n def detect_end_of_execution(self, line):\n return '##end_of_execution##' in line"
}
] | from .languages.applescript import AppleScript
from .languages.html import HTML
from .languages.javascript import JavaScript
from .languages.powershell import PowerShell
from .languages.python import Python
from .languages.r import R
from .languages.shell import Shell | 2,419 |
language_map = {
'python': Python,
'bash': Shell,
'shell': Shell,
'zsh': Shell,
|
language_map = {
'python': Python,
'bash': Shell,
'shell': Shell,
'zsh': Shell, | 'javascript': JavaScript, | 2 | 2023-12-12 04:24:00+00:00 | 4k |
chenchenygu/watermark-learnability | kgw_watermarking/watermark_reliability_release/normalizers.py | [
{
"identifier": "Categories",
"path": "kgw_watermarking/watermark_reliability_release/homoglyphs.py",
"snippet": "class Categories:\n \"\"\"\n Work with aliases from ISO 15924.\n https://en.wikipedia.org/wiki/ISO_15924#List_of_codes\n \"\"\"\n\n fpath = os.path.join(DATA_LOCATION, \"categories.json\")\n\n @classmethod\n def _get_ranges(cls, categories):\n \"\"\"\n :return: iter: (start code, end code)\n :rtype: list\n \"\"\"\n with open(cls.fpath, encoding=\"utf-8\") as f:\n data = json.load(f)\n\n for category in categories:\n if category not in data[\"aliases\"]:\n raise ValueError(\"Invalid category: {}\".format(category))\n\n for point in data[\"points\"]:\n if point[2] in categories:\n yield point[:2]\n\n @classmethod\n def get_alphabet(cls, categories):\n \"\"\"\n :return: set of chars in alphabet by categories list\n :rtype: set\n \"\"\"\n alphabet = set()\n for start, end in cls._get_ranges(categories):\n chars = (chr(code) for code in range(start, end + 1))\n alphabet.update(chars)\n return alphabet\n\n @classmethod\n def detect(cls, char):\n \"\"\"\n :return: category\n :rtype: str\n \"\"\"\n with open(cls.fpath, encoding=\"utf-8\") as f:\n data = json.load(f)\n\n # try detect category by unicodedata\n try:\n category = unicodedata.name(char).split()[0]\n except (TypeError, ValueError):\n # In Python2 unicodedata.name raise error for non-unicode chars\n # Python3 raise ValueError for non-unicode characters\n pass\n else:\n if category in data[\"aliases\"]:\n return category\n\n # try detect category by ranges from JSON file.\n code = ord(char)\n for point in data[\"points\"]:\n if point[0] <= code <= point[1]:\n return point[2]\n\n @classmethod\n def get_all(cls):\n with open(cls.fpath, encoding=\"utf-8\") as f:\n data = json.load(f)\n return set(data[\"aliases\"])"
},
{
"identifier": "Languages",
"path": "kgw_watermarking/watermark_reliability_release/homoglyphs.py",
"snippet": "class Languages:\n fpath = os.path.join(DATA_LOCATION, \"languages.json\")\n\n @classmethod\n def get_alphabet(cls, languages):\n \"\"\"\n :return: set of chars in alphabet by languages list\n :rtype: set\n \"\"\"\n with open(cls.fpath, encoding=\"utf-8\") as f:\n data = json.load(f)\n alphabet = set()\n for lang in languages:\n if lang not in data:\n raise ValueError(\"Invalid language code: {}\".format(lang))\n alphabet.update(data[lang])\n return alphabet\n\n @classmethod\n def detect(cls, char):\n \"\"\"\n :return: set of languages which alphabet contains passed char.\n :rtype: set\n \"\"\"\n with open(cls.fpath, encoding=\"utf-8\") as f:\n data = json.load(f)\n languages = set()\n for lang, alphabet in data.items():\n if char in alphabet:\n languages.add(lang)\n return languages\n\n @classmethod\n def get_all(cls):\n with open(cls.fpath, encoding=\"utf-8\") as f:\n data = json.load(f)\n return set(data.keys())"
},
{
"identifier": "Homoglyphs",
"path": "kgw_watermarking/watermark_reliability_release/homoglyphs.py",
"snippet": "class Homoglyphs:\n def __init__(\n self,\n categories=None,\n languages=None,\n alphabet=None,\n strategy=STRATEGY_IGNORE,\n ascii_strategy=STRATEGY_IGNORE,\n ascii_range=ASCII_RANGE,\n ):\n # strategies\n if strategy not in (STRATEGY_LOAD, STRATEGY_IGNORE, STRATEGY_REMOVE):\n raise ValueError(\"Invalid strategy\")\n self.strategy = strategy\n self.ascii_strategy = ascii_strategy\n self.ascii_range = ascii_range\n\n # Homoglyphs must be initialized by any alphabet for correct work\n if not categories and not languages and not alphabet:\n categories = (\"LATIN\", \"COMMON\")\n\n # cats and langs\n self.categories = set(categories or [])\n self.languages = set(languages or [])\n\n # alphabet\n self.alphabet = set(alphabet or [])\n if self.categories:\n alphabet = Categories.get_alphabet(self.categories)\n self.alphabet.update(alphabet)\n if self.languages:\n alphabet = Languages.get_alphabet(self.languages)\n self.alphabet.update(alphabet)\n self.table = self.get_table(self.alphabet)\n\n @staticmethod\n def get_table(alphabet):\n table = defaultdict(set)\n with open(os.path.join(DATA_LOCATION, \"confusables_sept2022.json\")) as f:\n data = json.load(f)\n for char in alphabet:\n if char in data:\n for homoglyph in data[char]:\n if homoglyph in alphabet:\n table[char].add(homoglyph)\n return table\n\n @staticmethod\n def get_restricted_table(source_alphabet, target_alphabet):\n table = defaultdict(set)\n with open(os.path.join(DATA_LOCATION, \"confusables_sept2022.json\")) as f:\n data = json.load(f)\n for char in source_alphabet:\n if char in data:\n for homoglyph in data[char]:\n if homoglyph in target_alphabet:\n table[char].add(homoglyph)\n return table\n\n @staticmethod\n def uniq_and_sort(data):\n result = list(set(data))\n result.sort(key=lambda x: (-len(x), x))\n return result\n\n def _update_alphabet(self, char):\n # try detect languages\n langs = Languages.detect(char)\n if langs:\n self.languages.update(langs)\n alphabet = Languages.get_alphabet(langs)\n self.alphabet.update(alphabet)\n else:\n # try detect categories\n category = Categories.detect(char)\n if category is None:\n return False\n self.categories.add(category)\n alphabet = Categories.get_alphabet([category])\n self.alphabet.update(alphabet)\n # update table for new alphabet\n self.table = self.get_table(self.alphabet)\n return True\n\n def _get_char_variants(self, char):\n if char not in self.alphabet:\n if self.strategy == STRATEGY_LOAD:\n if not self._update_alphabet(char):\n return []\n elif self.strategy == STRATEGY_IGNORE:\n return [char]\n elif self.strategy == STRATEGY_REMOVE:\n return []\n\n # find alternative chars for current char\n alt_chars = self.table.get(char, set())\n if alt_chars:\n # find alternative chars for alternative chars for current char\n alt_chars2 = [self.table.get(alt_char, set()) for alt_char in alt_chars]\n # combine all alternatives\n alt_chars.update(*alt_chars2)\n # add current char to alternatives\n alt_chars.add(char)\n\n # uniq, sort and return\n return self.uniq_and_sort(alt_chars)\n\n def _get_combinations(self, text, ascii=False):\n variations = []\n for char in text:\n alt_chars = self._get_char_variants(char)\n\n if ascii:\n alt_chars = [char for char in alt_chars if ord(char) in self.ascii_range]\n if not alt_chars and self.ascii_strategy == STRATEGY_IGNORE:\n return\n\n if alt_chars:\n variations.append(alt_chars)\n if variations:\n for variant in product(*variations):\n yield \"\".join(variant)\n\n def get_combinations(self, text):\n return list(self._get_combinations(text))\n\n def _to_ascii(self, text):\n for variant in self._get_combinations(text, ascii=True):\n if max(map(ord, variant)) in self.ascii_range:\n yield variant\n\n def to_ascii(self, text):\n return self.uniq_and_sort(self._to_ascii(text))"
}
] | from collections import defaultdict
from functools import cache
from .homoglyphs import Categories, Languages, Homoglyphs
from nltk import pos_tag, word_tokenize # noqa
from nltk import pos_tag, word_tokenize
import re
import unicodedata
import spacy
import nltk
import nltk | 2,390 | """ Text-based normalizers, used to mitigate simple attacks against watermarking.
This implementation is unlikely to be a complete list of all possible exploits within the unicode standard,
it represents our best effort at the time of writing.
These normalizers can be used as stand-alone normalizers. They could be made to conform to HF tokenizers standard, but that would
require messing with the limited rust interface of tokenizers.NormalizedString
"""
def normalization_strategy_lookup(strategy_name: str) -> object:
if strategy_name == "unicode":
return UnicodeSanitizer()
elif strategy_name == "homoglyphs":
return HomoglyphCanonizer()
elif strategy_name == "truecase":
return TrueCaser()
class HomoglyphCanonizer:
"""Attempts to detect homoglyph attacks and find a consistent canon.
This function does so on a per-ISO-category level. Language-level would also be possible (see commented code).
"""
def __init__(self):
self.homoglyphs = None
def __call__(self, homoglyphed_str: str) -> str:
# find canon:
target_category, all_categories = self._categorize_text(homoglyphed_str)
homoglyph_table = self._select_canon_category_and_load(target_category, all_categories)
return self._sanitize_text(target_category, homoglyph_table, homoglyphed_str)
def _categorize_text(self, text: str) -> dict:
iso_categories = defaultdict(int)
# self.iso_languages = defaultdict(int)
for char in text:
| """ Text-based normalizers, used to mitigate simple attacks against watermarking.
This implementation is unlikely to be a complete list of all possible exploits within the unicode standard,
it represents our best effort at the time of writing.
These normalizers can be used as stand-alone normalizers. They could be made to conform to HF tokenizers standard, but that would
require messing with the limited rust interface of tokenizers.NormalizedString
"""
def normalization_strategy_lookup(strategy_name: str) -> object:
if strategy_name == "unicode":
return UnicodeSanitizer()
elif strategy_name == "homoglyphs":
return HomoglyphCanonizer()
elif strategy_name == "truecase":
return TrueCaser()
class HomoglyphCanonizer:
"""Attempts to detect homoglyph attacks and find a consistent canon.
This function does so on a per-ISO-category level. Language-level would also be possible (see commented code).
"""
def __init__(self):
self.homoglyphs = None
def __call__(self, homoglyphed_str: str) -> str:
# find canon:
target_category, all_categories = self._categorize_text(homoglyphed_str)
homoglyph_table = self._select_canon_category_and_load(target_category, all_categories)
return self._sanitize_text(target_category, homoglyph_table, homoglyphed_str)
def _categorize_text(self, text: str) -> dict:
iso_categories = defaultdict(int)
# self.iso_languages = defaultdict(int)
for char in text: | iso_categories[Categories.detect(char)] += 1 | 0 | 2023-12-07 16:45:33+00:00 | 4k |
skyoux/SemAIM | models/models_semaim.py | [
{
"identifier": "get_2d_sincos_pos_embed",
"path": "util/pos_embed.py",
"snippet": "def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):\n \"\"\"\n grid_size: int of the grid height and width\n return:\n pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)\n \"\"\"\n grid_h = np.arange(grid_size, dtype=np.float32)\n grid_w = np.arange(grid_size, dtype=np.float32)\n grid = np.meshgrid(grid_w, grid_h) # here w goes first\n grid = np.stack(grid, axis=0)\n\n grid = grid.reshape([2, 1, grid_size, grid_size])\n pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n if cls_token:\n pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)\n return pos_embed"
},
{
"identifier": "GaussianConv2d",
"path": "util/blocks.py",
"snippet": "class GaussianConv2d(nn.Module):\n def __init__(self, channels=3, kernel_size=9, sigma=1):\n super().__init__()\n position = torch.stack(torch.meshgrid([torch.arange(kernel_size), torch.arange(kernel_size)]), dim=-1)\n mean = torch.tensor([(kernel_size - 1) // 2, (kernel_size - 1) // 2])\n std = torch.tensor([sigma, sigma])\n kernel = 1 / (2 * math.pi * torch.prod(std, dim=-1)) * math.e ** (-((position - mean) ** 2 / std ** 2).sum(-1)/2)\n kernel = kernel / kernel.sum()\n\n kernel = kernel.view(1, 1, kernel_size, kernel_size).repeat(channels, 1, 1, 1)\n\n self.register_buffer('weight', kernel)\n self.groups = channels\n self.padding = kernel_size // 2\n\n def forward(self, input):\n return F.conv2d(input, weight=self.weight, groups=self.groups, padding=self.padding)"
},
{
"identifier": "Block_SelfMask",
"path": "util/blocks.py",
"snippet": "class Block_SelfMask(nn.Module):\n\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention_SelfMask(\n dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n def forward(self, x, mask=None, return_attention=False):\n if return_attention:\n return self.attn(self.norm1(x), mask, return_attention)\n x = x + self.drop_path(self.attn(self.norm1(x), mask))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x"
},
{
"identifier": "Block_SelfCrossMask",
"path": "util/blocks.py",
"snippet": "class Block_SelfCrossMask(nn.Module):\n \"\"\"\n The universal attention block can be used as both self-attention and cross-attention.\n q,k,v can define separately.\n If we only assign a value to q, it's a self-attention block;\n if we assign values for q and k, it's a cross-attention block.\n \"\"\"\n\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention_SelfCrossMask(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n def forward(self, q, k=None, v=None, mask=None, return_attention=False):\n if k is None:\n k = q\n if v is None:\n v = k\n if return_attention:\n return self.attn(self.norm1(q), self.norm1(k), self.norm1(v), mask, return_attention)\n x = q + self.drop_path(self.attn(self.norm1(q), self.norm1(k), self.norm1(v), mask))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x"
}
] | import math
import torch
import torch.nn as nn
from functools import partial
from timm.models.vision_transformer import PatchEmbed, Mlp
from util.pos_embed import get_2d_sincos_pos_embed
from util.blocks import GaussianConv2d
from util.blocks import Block_SelfMask, Block_SelfCrossMask | 1,795 | # References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
class AimViT(nn.Module):
"""
Pretrain vision transformer backbone with AIM
parall encoder-decoder architecture
Modified by sky: use the blocks in ViT (+ mask) for encoders, which is more convinent for finetune, linear
modify the permutation form stochastic mask to center-out mask
"""
def __init__(self,
# vision transformer backbone
img_size=224, patch_size=16, in_chans=3,
embed_dim=1024, depth=24, num_heads=16, drop_path_rate=0., out_dim=768,
mlp_ratio=4., norm_layer=partial(nn.LayerNorm, eps=1e-6),
# aim
permutation_type='center2out', attention_type='cls',
# decoder
query_depth=12, share_weight=False,
prediction_head_type='MLP',
# loss function
gaussian_kernel_size=None, gaussian_sigma=None,
loss_type='L2', predict_feature='none', norm_pix_loss=True):
super().__init__()
# patch embedding
self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim)
num_patches = self.patch_embed.num_patches
self.patch_size = patch_size
# cls token
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# position embedding
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
# encoder
self.blocks = nn.ModuleList([
Block_SelfMask(embed_dim, num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, drop_path=dpr[i])
for i in range(depth)])
# decoder
if share_weight:
self.query_blocks = self.blocks
else:
self.query_blocks = nn.ModuleList([
| # References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
class AimViT(nn.Module):
"""
Pretrain vision transformer backbone with AIM
parall encoder-decoder architecture
Modified by sky: use the blocks in ViT (+ mask) for encoders, which is more convinent for finetune, linear
modify the permutation form stochastic mask to center-out mask
"""
def __init__(self,
# vision transformer backbone
img_size=224, patch_size=16, in_chans=3,
embed_dim=1024, depth=24, num_heads=16, drop_path_rate=0., out_dim=768,
mlp_ratio=4., norm_layer=partial(nn.LayerNorm, eps=1e-6),
# aim
permutation_type='center2out', attention_type='cls',
# decoder
query_depth=12, share_weight=False,
prediction_head_type='MLP',
# loss function
gaussian_kernel_size=None, gaussian_sigma=None,
loss_type='L2', predict_feature='none', norm_pix_loss=True):
super().__init__()
# patch embedding
self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim)
num_patches = self.patch_embed.num_patches
self.patch_size = patch_size
# cls token
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# position embedding
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
# encoder
self.blocks = nn.ModuleList([
Block_SelfMask(embed_dim, num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, drop_path=dpr[i])
for i in range(depth)])
# decoder
if share_weight:
self.query_blocks = self.blocks
else:
self.query_blocks = nn.ModuleList([ | Block_SelfCrossMask(embed_dim, num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, drop_path=dpr[i]) | 3 | 2023-12-10 15:17:11+00:00 | 4k |
boweniac/autogan | autogan/utils/compressed_messages_utils.py | [
{
"identifier": "generate_chat_completion",
"path": "autogan/oai/generate_utils.py",
"snippet": "def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\\\n -> tuple[Optional[str], Optional[int]]:\n \"\"\"Call the LLM interface\n\n Currently, only the chatgpt model of openai (including azure) is adapted.\n\n :param llm_config: LLM configuration.\n :param messages:\n :param agent_name:\n :param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries\n - main: agent replies\n - idea: deep thoughts\n - messages_summary: context compression\n - text_summary: general summaries\n - clue_summary: clue summaries\n :param response_func: Used to return results to the interface or terminal.\n :param stream_mode:\n \"\"\"\n\n # When a certain configuration in the configuration list fails to request,\n # continue to try the next configuration until all configurations in the list are attempted.\n loop = llm_config.len_of_api_key_list\n for i in range(loop):\n time.sleep(llm_config.request_interval_time)\n api_key = llm_config.next_api_key\n try:\n completion_content = \"\"\n completion_tokens = 0\n index = 1\n for message in chat_completions(messages, api_key, llm_config.request_timeout,\n llm_config.max_retries, stream_mode):\n content = \"\"\n if stream_mode:\n if (message and \"choices\" in message and \"delta\" in message[\"choices\"][0]\n and \"content\" in message[\"choices\"][0][\"delta\"]\n and message[\"choices\"][0][\"delta\"][\"content\"]):\n content = message[\"choices\"][0][\"delta\"][\"content\"]\n completion_content += content\n else:\n if (message and \"choices\" in message and \"message\" in message[\"choices\"][0]\n and \"content\" in message[\"choices\"][0][\"message\"]\n and message[\"choices\"][0][\"message\"][\"content\"]):\n content = message[\"choices\"][0][\"message\"][\"content\"]\n completion_content = content\n if message and \"usage\" in message and \"completion_tokens\" in message[\"usage\"]:\n completion_tokens = message[\"usage\"][\"completion_tokens\"]\n response_func(agent_name, gen, api_key[\"model\"], stream_mode, index, content, completion_tokens, message)\n if content:\n index += 1\n\n if completion_content:\n if completion_tokens == 0:\n completion_tokens = count_text_tokens(completion_content, api_key['model'])\n return completion_content, completion_tokens\n else:\n raise ValueError(\"The return value is empty.\")\n except Exception as e:\n if i == loop - 1:\n print(f\"generate_chat_completion Exception: {e}\")\n return None, None"
},
{
"identifier": "LLMConfig",
"path": "autogan/oai/config_utils.py",
"snippet": "class LLMConfig:\n \"\"\"LLM config object\n \"\"\"\n\n def __init__(\n self,\n api_key_list: ConfigList,\n max_messages_tokens: str,\n request_interval_time: int,\n request_timeout: int,\n max_retries: int\n ):\n self._api_key_list = api_key_list\n self._max_messages_tokens = max_messages_tokens\n self._request_interval_time = request_interval_time\n self._request_timeout = request_timeout\n self._max_retries = max_retries\n\n def api_key(self, index):\n \"\"\"Get the one configuration in the api_key_list.\n \"\"\"\n return self._api_key_list.get_config(index)\n\n @property\n def next_api_key(self):\n \"\"\"Get the next configuration in the api_key_list.\n \"\"\"\n return self._api_key_list.get_next_config\n\n @property\n def len_of_api_key_list(self) -> int:\n \"\"\"Get the first configuration in the api_key_list list.\n \"\"\"\n return self._api_key_list.len\n\n @property\n def model(self):\n \"\"\"Get the model of the first configuration in the api_key_list list.\n \"\"\"\n return self._api_key_list.get_first_config[\"model\"]\n\n @property\n def max_messages_tokens(self):\n \"\"\"Limit the maximum tokens of the context in each dialogue.\n \"\"\"\n return self._max_messages_tokens\n\n @property\n def request_interval_time(self):\n return self._request_interval_time\n\n @property\n def request_timeout(self):\n return self._request_timeout\n\n @property\n def max_retries(self):\n return self._max_retries"
},
{
"identifier": "ResponseFuncType",
"path": "autogan/utils/response.py",
"snippet": " def colored(x, *args, **kwargs):\ndef default_response_func(agent_name: str, gen: str, model: str, stream_mode: bool, index: int,\n content: Optional[str], tokens: Optional[int], response: any):\ndef obj_to_dict(obj):"
}
] | import json
from typing import Dict, Optional, List
from autogan.oai.generate_utils import generate_chat_completion
from autogan.oai.config_utils import LLMConfig
from autogan.utils.response import ResponseFuncType | 3,398 |
Note: The compression process does not treat messages from the 'system' role specially, and they should be excluded from 'messages'.
注意:压缩过程并未对 system 角色的消息进行特殊处理,应将其排除在 messages 之外。
:param messages: The conversation content to be compressed, excluding 'system message' and 'focus message'. It should include 'role', 'content', 'tokens' fields.
待压缩的会话内容,应排除掉 system message 和 focus message。需包含 'role','content','tokens' 字段。
:param focus: The focus direction when compressing distant conversation records
压缩远期会话记录时的专注方向
:param summary_model_config: The LLM model configuration used to compress distant conversation records
用于压缩远期会话记录的 LLM 模型配置
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:param safe_size: 'max_messages_tokens' of 'agent main model' minus the tokens of 'system message' and 'focus message'. When 'safe_size' is less than 0, it will be forcibly defined as 1024
agent main model 的 max_messages_tokens 减去 system message 和 focus message 的 tokens,当 safe_size 小于 0 时,将被强制定义为 1024
:return:
--conversation_messages: The compressed conversation records, the difference from 'request_messages' is that the 'tokens' field of each message is retained
压缩后的会话记录,与 request_messages 的区别是保留了每条消息的 tokens 字段
--request_messages: The message content requested to 'llm', removed the 'tokens' field of each message
用于向 llm 请求的消息内容,去掉了每条消息的 tokens 字段
--total_tokens: The total tokens after compression
压缩后的整体tokens
"""
conversation_messages = []
request_messages = []
total_tokens = 0
if len(messages) == 0:
return None, None, None
if safe_size < 0:
safe_size = 1024
# Reverse traverse the message to extract recent original conversation content.
i = 0
for message in reversed(messages):
tokens = message["tokens"]
if total_tokens + tokens > int(safe_size * 0.5) and i != 0:
break
message_copy = message.copy()
message_copy.pop('tokens', None)
conversation_messages.insert(0, message)
request_messages.insert(0, message_copy)
total_tokens += tokens
i -= 1
# Compress the remaining messages as distant conversation records.
if len(messages) > (i * -1):
compressed_size = safe_size - total_tokens
if compressed_size <= 0:
compressed_size = 1024
# 压缩剩余 messages
content, tokens = generate_messages_summary(messages[:i], focus, summary_model_config, compressed_size, agent_name, response_func, stream_mode)
if content:
conversation_messages.insert(
0,
{'role': 'assistant', 'content': f'Earlier historical conversation records: {content}',
'tokens': tokens}
)
request_messages.insert(
0,
{'role': 'assistant', 'content': f'Earlier historical conversation records: {content}'}
)
total_tokens += tokens
if conversation_messages and request_messages:
return conversation_messages, request_messages, total_tokens
else:
return None, None, None
def generate_messages_summary(messages: List[Dict], focus: str, summary_model_config: LLMConfig, summary_size: int, agent_name: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None) -> tuple[str, int]:
"""Generate message summary
生成消息摘要
First, traverse the content of messages in reverse order, extract the long-term conversation records to be compressed, until the cumulative tokens of the long-term conversation records to be compressed exceed the value of max_messages_tokens in summary_model_config
先反向遍历 messages 中的内容,提取出待压缩的远期会话记录,直至待压缩远期会话记录的累计 tokens 超过 summary_model_config 中 max_messages_tokens 的值
If the tokens of the first record of the long-term conversation to be compressed exceed max_messages_tokens, then directly extract the first conversation record
如待压缩远期会话的第一条记录,其 tokens 就超过了 max_messages_tokens, 则直接提取第一条会话记录
Then compress the extracted long-term conversation records. The size after compression is expected to be kept within the range of summary_size
之后对提取出的远期会话记录进行压缩,压缩后的大小被期望保持在 summary_size 范围之内
:param messages: Messages to be compressed
待压缩的消息
:param focus: The focus direction when generating a summary
生成摘要时的专注方向
:param summary_model_config: The LLM model configuration for compressing long-term conversation records
用于压缩远期会话记录的 LLM 模型配置
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:return:
--content: Compressed content
压缩后的内容
--tokens: tokens of compressed content
压缩内容的tokens
"""
system_prompt = "Please make a concise summary based on the following historical information. Make sure your summary does not exceed the max_tokens limit. And when summarizing, please focus on the latest message sent by the user."
# system_prompt = "请根据以下的历史信息,进行简洁的总结。请确保您的总结不超过 max_tokens 的限制。并且在总结时,请将你的关注点集中在用户最新发送的消息上。"
summary_messages = []
total_tokens = 0
# 反向遍历 message 提取内容
for index, message in enumerate(reversed(messages)):
tokens = message["tokens"]
if total_tokens + tokens > summary_model_config.max_messages_tokens and index != 0:
break
message_copy = message.copy()
message_copy.pop('tokens', None)
summary_messages.insert(0, message_copy)
total_tokens += tokens
# 设置用户提示词
user_prompt = f"""max_tokens: {summary_size}\n\nHistorical information: {json.dumps(summary_messages)}\n\nUser's latest message: {focus}"""
chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': user_prompt}]
|
def compressed_messages(messages: List[Dict], focus: str, summary_model_config: LLMConfig, agent_name: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None,
safe_size: Optional[int] = 4096) -> tuple[Optional[list], Optional[list], Optional[int]]:
"""Compress Conversation Context
压缩会话上下文
The content to be compressed is divided into: recent original conversation content, and distant content that needs to be compressed.
待压缩的会话内容会被分为:近期的原始会话内容、远期需要压缩的会话内容。
When compressing distant conversation records, attention is focused on the 'focus'
在压缩远期会话记录时,会将注意力集中于 focus
**Recent Original Conversation Content:**
近期原始会话内容:
First, traverse the 'messages' in reverse order, extract the recent conversation records, until the cumulative tokens of the conversation records exceed 50% of the 'safe_size'
先反向遍历 messages,提取近期的会话记录,直至会话记录的累计 tokens 超过 safe_size 的 50%
If the tokens of the first recent conversation record exceed 50% of the 'safe_size', then directly extract the first recent conversation record
如近期第一条会话记录的 tokens 就超过了 safe_size 的 50% 则直接提取近期第一条会话记录
**Distant Compressed Conversation Content:**
远期压缩会话内容:
The remaining conversation records will be compressed as distant conversation records. The size after compression is expected to be within the range of ('safe_size' - cumulative original conversation tokens)
剩余的会话记录将作为远期会话记录进行压缩,压缩后的大小被期望保持在 (safe_size - 累计原始会话 tokens) 范围之内
If the value of 'safe_size' - cumulative original conversation tokens is less than 0, then the size after compression is expected to be 1024 tokens
如 safe_size - 累计原始会话 tokens 的值小于 0 则压缩后的大小被期望保持在 1024 tokens
Note: The compression process does not treat messages from the 'system' role specially, and they should be excluded from 'messages'.
注意:压缩过程并未对 system 角色的消息进行特殊处理,应将其排除在 messages 之外。
:param messages: The conversation content to be compressed, excluding 'system message' and 'focus message'. It should include 'role', 'content', 'tokens' fields.
待压缩的会话内容,应排除掉 system message 和 focus message。需包含 'role','content','tokens' 字段。
:param focus: The focus direction when compressing distant conversation records
压缩远期会话记录时的专注方向
:param summary_model_config: The LLM model configuration used to compress distant conversation records
用于压缩远期会话记录的 LLM 模型配置
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:param safe_size: 'max_messages_tokens' of 'agent main model' minus the tokens of 'system message' and 'focus message'. When 'safe_size' is less than 0, it will be forcibly defined as 1024
agent main model 的 max_messages_tokens 减去 system message 和 focus message 的 tokens,当 safe_size 小于 0 时,将被强制定义为 1024
:return:
--conversation_messages: The compressed conversation records, the difference from 'request_messages' is that the 'tokens' field of each message is retained
压缩后的会话记录,与 request_messages 的区别是保留了每条消息的 tokens 字段
--request_messages: The message content requested to 'llm', removed the 'tokens' field of each message
用于向 llm 请求的消息内容,去掉了每条消息的 tokens 字段
--total_tokens: The total tokens after compression
压缩后的整体tokens
"""
conversation_messages = []
request_messages = []
total_tokens = 0
if len(messages) == 0:
return None, None, None
if safe_size < 0:
safe_size = 1024
# Reverse traverse the message to extract recent original conversation content.
i = 0
for message in reversed(messages):
tokens = message["tokens"]
if total_tokens + tokens > int(safe_size * 0.5) and i != 0:
break
message_copy = message.copy()
message_copy.pop('tokens', None)
conversation_messages.insert(0, message)
request_messages.insert(0, message_copy)
total_tokens += tokens
i -= 1
# Compress the remaining messages as distant conversation records.
if len(messages) > (i * -1):
compressed_size = safe_size - total_tokens
if compressed_size <= 0:
compressed_size = 1024
# 压缩剩余 messages
content, tokens = generate_messages_summary(messages[:i], focus, summary_model_config, compressed_size, agent_name, response_func, stream_mode)
if content:
conversation_messages.insert(
0,
{'role': 'assistant', 'content': f'Earlier historical conversation records: {content}',
'tokens': tokens}
)
request_messages.insert(
0,
{'role': 'assistant', 'content': f'Earlier historical conversation records: {content}'}
)
total_tokens += tokens
if conversation_messages and request_messages:
return conversation_messages, request_messages, total_tokens
else:
return None, None, None
def generate_messages_summary(messages: List[Dict], focus: str, summary_model_config: LLMConfig, summary_size: int, agent_name: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None) -> tuple[str, int]:
"""Generate message summary
生成消息摘要
First, traverse the content of messages in reverse order, extract the long-term conversation records to be compressed, until the cumulative tokens of the long-term conversation records to be compressed exceed the value of max_messages_tokens in summary_model_config
先反向遍历 messages 中的内容,提取出待压缩的远期会话记录,直至待压缩远期会话记录的累计 tokens 超过 summary_model_config 中 max_messages_tokens 的值
If the tokens of the first record of the long-term conversation to be compressed exceed max_messages_tokens, then directly extract the first conversation record
如待压缩远期会话的第一条记录,其 tokens 就超过了 max_messages_tokens, 则直接提取第一条会话记录
Then compress the extracted long-term conversation records. The size after compression is expected to be kept within the range of summary_size
之后对提取出的远期会话记录进行压缩,压缩后的大小被期望保持在 summary_size 范围之内
:param messages: Messages to be compressed
待压缩的消息
:param focus: The focus direction when generating a summary
生成摘要时的专注方向
:param summary_model_config: The LLM model configuration for compressing long-term conversation records
用于压缩远期会话记录的 LLM 模型配置
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:return:
--content: Compressed content
压缩后的内容
--tokens: tokens of compressed content
压缩内容的tokens
"""
system_prompt = "Please make a concise summary based on the following historical information. Make sure your summary does not exceed the max_tokens limit. And when summarizing, please focus on the latest message sent by the user."
# system_prompt = "请根据以下的历史信息,进行简洁的总结。请确保您的总结不超过 max_tokens 的限制。并且在总结时,请将你的关注点集中在用户最新发送的消息上。"
summary_messages = []
total_tokens = 0
# 反向遍历 message 提取内容
for index, message in enumerate(reversed(messages)):
tokens = message["tokens"]
if total_tokens + tokens > summary_model_config.max_messages_tokens and index != 0:
break
message_copy = message.copy()
message_copy.pop('tokens', None)
summary_messages.insert(0, message_copy)
total_tokens += tokens
# 设置用户提示词
user_prompt = f"""max_tokens: {summary_size}\n\nHistorical information: {json.dumps(summary_messages)}\n\nUser's latest message: {focus}"""
chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': user_prompt}] | return generate_chat_completion(summary_model_config, chat_messages, agent_name, "text_summary", response_func, | 0 | 2023-12-06 03:24:34+00:00 | 4k |
JingHao99/IDR-Ingredients-oriented-Degradation-Reformulation | models/archs/IDR_restormer_arch.py | [
{
"identifier": "LayerNorm",
"path": "models/utils/arch_util.py",
"snippet": "class LayerNorm(nn.Module):\r\n \"\"\"\r\n x: B,C,H,W\r\n return: B,C,H,W\r\n process: LayerNorm(C)\r\n Adopted: Restormer\r\n \"\"\"\r\n def __init__(self, dim, LayerNorm_type):\r\n super(LayerNorm, self).__init__()\r\n if LayerNorm_type == 'BiasFree':\r\n self.body = BiasFree_LayerNorm(dim)\r\n else:\r\n self.body = WithBias_LayerNorm(dim)\r\n\r\n def forward(self, x):\r\n if len(x.shape)==4:\r\n h, w = x.shape[-2:]\r\n return to_4d(self.body(to_3d(x)), h, w)\r\n else:\r\n return self.body(x)\r"
},
{
"identifier": "Downsample",
"path": "models/utils/transformerBCHW_util.py",
"snippet": "class Downsample(nn.Module):\n \"\"\"\n x: B,C,H,W\n return: B,2C,H/2,W/2\n process: conv + pixel-unshuffle (降C,分辨率补C)\n Adopted: Restormer\n \"\"\"\n def __init__(self, n_feat):\n super(Downsample, self).__init__()\n\n self.body = nn.Sequential(nn.Conv2d(n_feat, n_feat // 2, kernel_size=3, stride=1, padding=1, bias=False),\n nn.PixelUnshuffle(2)) # (1,4,16,16) -> (1,16,8,8)\n\n def forward(self, x):\n return self.body(x)"
},
{
"identifier": "Upsample",
"path": "models/utils/transformerBCHW_util.py",
"snippet": "class Upsample(nn.Module):\n \"\"\"\n x: B,C,H,W\n return: B,C/2,2H,2W\n process: conv + pixel-shuffle (升C, C补分辨率)\n Adopted: Restormer\n \"\"\"\n def __init__(self, n_feat):\n super(Upsample, self).__init__()\n\n self.body = nn.Sequential(nn.Conv2d(n_feat, n_feat * 2, kernel_size=3, stride=1, padding=1, bias=False),\n nn.PixelShuffle(2)) # (1,4,16,16) -> (1,1,32,32)\n\n def forward(self, x):\n return self.body(x)"
},
{
"identifier": "MDTA_TransformerBlock",
"path": "models/utils/transformerBCHW_util.py",
"snippet": "class MDTA_TransformerBlock(nn.Module):\n \"\"\"\n x: B,C,H,W\n return: B,C,H,W\n process: MDTA + GDFN\n params: dim, num_heads, ffn_expansion_factor, bias:True/false LayerNorm_type: BiasFree/WithBias\n Adopted:: Restormer\n \"\"\"\n def __init__(self, dim, num_heads, ffn_expansion_factor, bias, LayerNorm_type):\n super(MDTA_TransformerBlock, self).__init__()\n\n self.norm1 = LayerNorm(dim, LayerNorm_type)\n self.attn = MDTA_Attention(dim, num_heads, bias)\n self.norm2 = LayerNorm(dim, LayerNorm_type)\n self.ffn = FeedForward(dim, ffn_expansion_factor, bias)\n\n def forward(self, x):\n x = x + self.attn(self.norm1(x))\n x = x + self.ffn(self.norm2(x))\n\n return x"
},
{
"identifier": "OverlapPatchEmbed_Keep",
"path": "models/utils/transformerBCHW_util.py",
"snippet": "class OverlapPatchEmbed_Keep(nn.Module):\n \"\"\"\n x: B,C1,H,W\n return: B,C2,H,W\n process: 单conv层\n Adopted: Restormer\n \"\"\"\n def __init__(self, in_c=3, embed_dim=48, bias=False):\n super(OverlapPatchEmbed_Keep, self).__init__()\n\n self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=3, stride=1, padding=1, bias=bias)\n\n def forward(self, x):\n x = self.proj(x)\n\n return x"
},
{
"identifier": "Key_TransformerBlock",
"path": "models/utils/module.py",
"snippet": "class Key_TransformerBlock(nn.Module):\n \"\"\"\n x: B,C,H,W key: B,K,C\n return: B,C,H,W\n process: MDTA + GDFN\n params: dim, num_heads, ffn_expansion_factor, bias:True/false LayerNorm_type: BiasFree/WithBias\n Adopted:: Restormer\n \"\"\"\n def __init__(self, dim, dimkey, num_heads, ffn_expansion_factor, bias, LayerNorm_type, principle=True, sam=False, ops_type=4, pred=False):\n super(Key_TransformerBlock, self).__init__()\n\n self.normkey = nn.LayerNorm(dimkey, elementwise_affine=False)\n self.norm1 = LayerNorm(dim, LayerNorm_type)\n self.attn = Key_Attention(dim, dimkey, num_heads, bias)\n self.norm2 = LayerNorm(dim, LayerNorm_type)\n self.ffn = DW_Mlp(dim, ffn_expansion_factor, bias)\n self.sam = sam\n self.principle = principle\n if principle:\n self.principle = PrincipleNet(dim=dim, ops_type=ops_type,pred=pred)\n if sam:\n self.SAM = SAM(n_feat=dim, kernel_size=1, bias=bias)\n\n def forward(self, im_degra, key, resize_img=None,degra_type=None):\n if degra_type is None:\n weight = self.principle.pred(im_degra).squeeze()\n dynamic_S = weight @ key[1]\n if len(dynamic_S.shape)==1:\n dynamic_S = dynamic_S[None,:]\n dynamic_S = torch.stack(list(map(lambda x: torch.diag(x), dynamic_S)))\n key = key[0]@dynamic_S@key[2]\n\n if self.sam:\n degra_map, img = self.SAM(im_degra,resize_img)\n degra_map = self.attn(self.norm1(degra_map), self.normkey(key))\n else:\n degra_map = self.attn(self.norm1(im_degra), self.normkey(key))\n\n if self.principle:\n im_degra, pred = self.principle(im_degra,degra_map,degra_type=degra_type)\n else:\n im_degra = im_degra - degra_map*im_degra \n\n im_degra = im_degra + self.ffn(self.norm2(im_degra))\n\n if self.sam:\n return im_degra, img, pred\n else:\n return im_degra, None, pred"
},
{
"identifier": "PI_MLP_Mixer",
"path": "models/utils/module.py",
"snippet": "class PI_MLP_Mixer(nn.Module):\n \"\"\"\n pca_token: full_cat tokens after pca -> compact token B,K,C\n \"\"\"\n def __init__(self,dim,num_degra,keep_degra,ffn_expansion_factor=2.66,init='pca'):\n super(PI_MLP_Mixer,self).__init__()\n self.keep_degra = keep_degra\n self.init=init\n self.convU = nn.Conv2d(num_degra,int(num_degra//5),1,1,0)\n self.convV = nn.Conv2d(num_degra,int(num_degra//5),1,1,0)\n self.apply(self._init_weights)\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n \n def forward(self,U,V,B):\n U = self.convU(U)\n V = self.convV(V)\n U = U.squeeze().transpose(0,1)[None,:,:].expand(B,-1,-1)\n V = V.squeeze()[None,:,:].expand(B,-1,-1)\n return U,V"
},
{
"identifier": "process_USV",
"path": "models/utils/module.py",
"snippet": "def process_USV(pca_token):\n U,S,V = torch.svd(pca_token)\n U = U.transpose(1,2)\n U = rearrange(U, 'n k c -> (n k) c')\n U = U[None,:,:,None] # 1 nk, k, 1 -> 1 k, k, 1\n V = V.transpose(1,2)\n V =rearrange(V, 'n k c -> (n k) c')\n V = V[None,:,:,None] # 1 nk, c, 1 -> 1 k, c, 1\n return U,S,V"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torchvision.transforms import Resize
from pdb import set_trace as stx
from models.utils.arch_util import LayerNorm
from models.utils.transformerBCHW_util import Downsample, Upsample, MDTA_TransformerBlock, OverlapPatchEmbed_Keep
from models.utils.module import Key_TransformerBlock, PI_MLP_Mixer, process_USV
from einops import rearrange | 2,501 |
##########################################################################
class IDR_restormer(nn.Module):
def __init__(self,
inp_channels=3,
out_channels=3,
dim=48,
num_blocks=[4, 6, 6, 8],
num_refinement_blocks=4,
heads=[1, 2, 4, 8],
ffn_expansion_factor=2.66,
bias=False,
LayerNorm_type='WithBias', ## Other option 'BiasFree'
num_degra_queries = 24,
keep_degra = 48,
degra_type = 5,
sam = True,
ops_type = 5,
pred = True
):
super(IDR_restormer, self).__init__()
self.de_dict = {'denoise': 0, 'denoise_15': 0, 'denoise_25': 0, 'denoise_50': 0, 'derain': 1, 'dehaze': 2, 'deblur': 3, 'delowlight': 4, 'clean': 5}
self.patch_embed =OverlapPatchEmbed_Keep(inp_channels, dim)
self.encoder_level1 = nn.Sequential(*[
MDTA_TransformerBlock(dim=dim, num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias,
LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])
|
##########################################################################
class IDR_restormer(nn.Module):
def __init__(self,
inp_channels=3,
out_channels=3,
dim=48,
num_blocks=[4, 6, 6, 8],
num_refinement_blocks=4,
heads=[1, 2, 4, 8],
ffn_expansion_factor=2.66,
bias=False,
LayerNorm_type='WithBias', ## Other option 'BiasFree'
num_degra_queries = 24,
keep_degra = 48,
degra_type = 5,
sam = True,
ops_type = 5,
pred = True
):
super(IDR_restormer, self).__init__()
self.de_dict = {'denoise': 0, 'denoise_15': 0, 'denoise_25': 0, 'denoise_50': 0, 'derain': 1, 'dehaze': 2, 'deblur': 3, 'delowlight': 4, 'clean': 5}
self.patch_embed =OverlapPatchEmbed_Keep(inp_channels, dim)
self.encoder_level1 = nn.Sequential(*[
MDTA_TransformerBlock(dim=dim, num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias,
LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])
| self.down1_2 = Downsample(dim) ## From Level 1 to Level 2 | 1 | 2023-12-07 10:58:34+00:00 | 4k |
TACJu/Compositor | Compositor_Mask2Former/mask2former/modeling/criterion.py | [
{
"identifier": "is_dist_avail_and_initialized",
"path": "Compositor_Mask2Former/mask2former/utils/misc.py",
"snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True"
},
{
"identifier": "nested_tensor_from_tensor_list",
"path": "Compositor_Mask2Former/mask2former/utils/misc.py",
"snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], : img.shape[2]] = False\n else:\n raise ValueError(\"not supported\")\n return NestedTensor(tensor, mask)"
}
] | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list | 1,682 | classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks
dice_loss_jit = torch.jit.script(
dice_loss
) # type: torch.jit.ScriptModule
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
return loss.mean(1).sum() / num_masks
sigmoid_ce_loss_jit = torch.jit.script(
sigmoid_ce_loss
) # type: torch.jit.ScriptModule
def calculate_uncertainty(logits):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
assert logits.shape[1] == 1
gt_class_logits = logits.clone()
return -(torch.abs(gt_class_logits))
class SetCriterion(nn.Module):
"""This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,
num_points, oversample_ratio, importance_sample_ratio):
"""Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
# pointwise mask loss parameters
self.num_points = num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
def loss_labels(self, outputs, targets, indices, num_masks):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"].float()
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {"loss_ce": loss_ce}
return losses
def loss_masks(self, outputs, targets, indices, num_masks):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
| # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/detr.py
"""
MaskFormer criterion.
"""
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks
dice_loss_jit = torch.jit.script(
dice_loss
) # type: torch.jit.ScriptModule
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
return loss.mean(1).sum() / num_masks
sigmoid_ce_loss_jit = torch.jit.script(
sigmoid_ce_loss
) # type: torch.jit.ScriptModule
def calculate_uncertainty(logits):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
assert logits.shape[1] == 1
gt_class_logits = logits.clone()
return -(torch.abs(gt_class_logits))
class SetCriterion(nn.Module):
"""This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,
num_points, oversample_ratio, importance_sample_ratio):
"""Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
# pointwise mask loss parameters
self.num_points = num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
def loss_labels(self, outputs, targets, indices, num_masks):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"].float()
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {"loss_ce": loss_ce}
return losses
def loss_masks(self, outputs, targets, indices, num_masks):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss | target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() | 1 | 2023-12-12 11:49:28+00:00 | 4k |
Mirascope/mirascope | mirascope/cli/commands.py | [
{
"identifier": "MirascopeCommand",
"path": "mirascope/enums.py",
"snippet": "class MirascopeCommand(_Enum):\n \"\"\"CLI commands to be executed.\"\"\"\n\n ADD = \"add\"\n USE = \"use\"\n STATUS = \"status\"\n INIT = \"init\""
},
{
"identifier": "CURRENT_REVISION_KEY",
"path": "mirascope/cli/constants.py",
"snippet": "CURRENT_REVISION_KEY = \"CURRENT_REVISION\""
},
{
"identifier": "LATEST_REVISION_KEY",
"path": "mirascope/cli/constants.py",
"snippet": "LATEST_REVISION_KEY = \"LATEST_REVISION\""
},
{
"identifier": "MirascopeSettings",
"path": "mirascope/cli/schemas.py",
"snippet": "class MirascopeSettings(BaseModel):\n \"\"\"Model for the user's mirascope settings.\"\"\"\n\n mirascope_location: str\n versions_location: str\n prompts_location: str\n version_file_name: str\n\n model_config = ConfigDict(extra=\"forbid\")"
},
{
"identifier": "check_status",
"path": "mirascope/cli/utils.py",
"snippet": "def check_status(\n mirascope_settings: MirascopeSettings, directory: str\n) -> Optional[str]:\n \"\"\"Checks the status of the given directory.\"\"\"\n version_directory_path = mirascope_settings.versions_location\n prompt_directory_path = mirascope_settings.prompts_location\n version_file_name = mirascope_settings.version_file_name\n prompt_directory = os.path.join(version_directory_path, directory)\n used_prompt_path = f\"{prompt_directory_path}/{directory}.py\"\n\n # Get the currently used prompt version\n versions = get_prompt_versions(f\"{prompt_directory}/{version_file_name}\")\n if versions is None:\n return used_prompt_path\n current_head = versions.current_revision\n if current_head is None:\n return used_prompt_path\n current_version_prompt_path = find_prompt_path(prompt_directory, current_head)\n\n # Check if users prompt matches the current prompt version\n has_file_changed = check_prompt_changed(\n current_version_prompt_path, used_prompt_path\n )\n if has_file_changed:\n return used_prompt_path\n return None"
},
{
"identifier": "find_prompt_path",
"path": "mirascope/cli/utils.py",
"snippet": "def find_prompt_path(directory, prefix):\n \"\"\"Finds and opens the prompt with the given directory.\"\"\"\n pattern = os.path.join(directory, prefix + \"*.py\")\n prompt_files = glob.glob(pattern)\n\n if not prompt_files:\n return None # No files found\n\n # Return first file found\n return prompt_files[0]"
},
{
"identifier": "get_prompt_versions",
"path": "mirascope/cli/utils.py",
"snippet": "def get_prompt_versions(version_file_path: str) -> VersionTextFile:\n \"\"\"Returns the versions of the given prompt.\"\"\"\n versions = VersionTextFile()\n try:\n with open(version_file_path, \"r\", encoding=\"utf-8\") as file:\n file.seek(0)\n for line in file:\n # Check if the current line contains the key\n if line.startswith(CURRENT_REVISION_KEY + \"=\"):\n versions.current_revision = line.split(\"=\")[1].strip()\n elif line.startswith(LATEST_REVISION_KEY + \"=\"):\n versions.latest_revision = line.split(\"=\")[1].strip()\n return versions\n except FileNotFoundError:\n return versions"
},
{
"identifier": "get_user_mirascope_settings",
"path": "mirascope/cli/utils.py",
"snippet": "def get_user_mirascope_settings(\n ini_file_path: str = \"mirascope.ini\",\n) -> MirascopeSettings:\n \"\"\"Returns the user's mirascope settings.\"\"\"\n config = ConfigParser()\n config.read(ini_file_path)\n return MirascopeSettings(**config[\"mirascope\"])"
},
{
"identifier": "update_version_text_file",
"path": "mirascope/cli/utils.py",
"snippet": "def update_version_text_file(version_file: str, updates: dict):\n \"\"\"Updates the version text file.\"\"\"\n try:\n modified_lines = []\n edits_made = {\n key: False for key in updates\n } # Track which keys already exist in the file\n version_file_path: Path = Path(version_file)\n if not version_file_path.is_file():\n version_file_path.touch()\n # Read the file and apply updates\n with open(version_file_path, \"r\", encoding=\"utf-8\") as file:\n for line in file:\n # Check if the current line contains any of the keys\n for key, value in updates.items():\n if line.startswith(key + \"=\"):\n modified_lines.append(f\"{key}={value}\\n\")\n edits_made[key] = True\n break\n else:\n # No key found, so keep the line as is\n modified_lines.append(line)\n\n # Add any keys that were not found at the end of the file\n for key, value in updates.items():\n if not edits_made[key]:\n modified_lines.append(f\"{key}={value}\\n\")\n\n # Write the modified content back to the file\n with open(version_file_path, \"w\", encoding=\"utf-8\") as file:\n file.writelines(modified_lines)\n except FileNotFoundError:\n print(f\"The file {version_file} was not found.\")\n except IOError as e:\n print(f\"An I/O error occurred: {e}\")"
},
{
"identifier": "write_prompt_to_template",
"path": "mirascope/cli/utils.py",
"snippet": "def write_prompt_to_template(\n file: str,\n command: Literal[MirascopeCommand.ADD, MirascopeCommand.USE],\n variables: Optional[dict] = None,\n):\n \"\"\"Writes the given prompt to the template.\"\"\"\n mirascope_directory = get_user_mirascope_settings().mirascope_location\n if variables is None:\n variables = {}\n template_loader = FileSystemLoader(searchpath=mirascope_directory)\n template_env = Environment(loader=template_loader)\n template = template_env.get_template(\"prompt_template.j2\")\n analyzer = PromptAnalyzer()\n tree = ast.parse(file)\n analyzer.visit(tree)\n if command == MirascopeCommand.ADD:\n new_variables = variables | analyzer.variables\n else: # command == MirascopeCommand.USE\n variables = dict.fromkeys(ignore_variables, None)\n new_variables = {\n k: analyzer.variables[k] for k in analyzer.variables if k not in variables\n }\n\n data = {\n \"comments\": analyzer.comments,\n \"variables\": new_variables,\n \"imports\": analyzer.imports,\n \"from_imports\": analyzer.from_imports,\n \"classes\": analyzer.classes,\n }\n return template.render(**data)"
}
] | import os
from importlib.resources import files
from pathlib import Path
from jinja2 import Template
from ..enums import MirascopeCommand
from .constants import CURRENT_REVISION_KEY, LATEST_REVISION_KEY
from .schemas import MirascopeSettings
from .utils import (
check_status,
find_prompt_path,
get_prompt_versions,
get_user_mirascope_settings,
update_version_text_file,
write_prompt_to_template,
) | 2,103 | """Commands for Mirascope CLI.
This module contains the commands for the Mirascope CLI. The commands are add, status,
use, and init. See the documentation for each command for more information.
"""
# TODO: Add something like Typer to make commands easier to implement
def add(args) -> None:
"""Adds the given prompt to the specified version directory.
The contents of the prompt in the user's prompts directory are copied to the version
directory with the next revision number, and the version file is updated with the
new revision.
Args:
args: The command line arguments for the `add` command, containing:
- `prompt`: The name of the prompt to add.
Raises:
FileNotFoundError: If the file is not found in the specified prompts directory.
"""
mirascope_settings = get_user_mirascope_settings()
version_directory_path = mirascope_settings.versions_location
prompt_directory_path = mirascope_settings.prompts_location
version_file_name = mirascope_settings.version_file_name
directory_name: str = args.prompt
# Check status before continuing
used_prompt_path = check_status(mirascope_settings, directory_name)
if not used_prompt_path:
print("No changes detected.")
return
class_directory = os.path.join(version_directory_path, directory_name)
# Create version directory if it doesn't exist
if not os.path.exists(class_directory):
os.makedirs(class_directory)
version_file_path = os.path.join(class_directory, version_file_name)
versions = get_prompt_versions(version_file_path)
# Open user's prompt file
with open(
f"{prompt_directory_path}/{directory_name}.py", "r+", encoding="utf-8"
) as file:
# Increment revision id
if versions.latest_revision is None:
# first revision
revision_id = "0001"
else:
# default branch with incrementation
latest_revision_id = versions.latest_revision
revision_id = f"{int(latest_revision_id)+1:04}"
# Create revision file
with open(
f"{class_directory}/{revision_id}_{directory_name}.py",
"w+",
encoding="utf-8",
) as file2:
custom_variables = {
"prev_revision_id": versions.current_revision,
"revision_id": revision_id,
}
file2.write(
write_prompt_to_template(
file.read(), MirascopeCommand.ADD, custom_variables
)
)
keys_to_update = {
CURRENT_REVISION_KEY: revision_id,
LATEST_REVISION_KEY: revision_id,
}
| """Commands for Mirascope CLI.
This module contains the commands for the Mirascope CLI. The commands are add, status,
use, and init. See the documentation for each command for more information.
"""
# TODO: Add something like Typer to make commands easier to implement
def add(args) -> None:
"""Adds the given prompt to the specified version directory.
The contents of the prompt in the user's prompts directory are copied to the version
directory with the next revision number, and the version file is updated with the
new revision.
Args:
args: The command line arguments for the `add` command, containing:
- `prompt`: The name of the prompt to add.
Raises:
FileNotFoundError: If the file is not found in the specified prompts directory.
"""
mirascope_settings = get_user_mirascope_settings()
version_directory_path = mirascope_settings.versions_location
prompt_directory_path = mirascope_settings.prompts_location
version_file_name = mirascope_settings.version_file_name
directory_name: str = args.prompt
# Check status before continuing
used_prompt_path = check_status(mirascope_settings, directory_name)
if not used_prompt_path:
print("No changes detected.")
return
class_directory = os.path.join(version_directory_path, directory_name)
# Create version directory if it doesn't exist
if not os.path.exists(class_directory):
os.makedirs(class_directory)
version_file_path = os.path.join(class_directory, version_file_name)
versions = get_prompt_versions(version_file_path)
# Open user's prompt file
with open(
f"{prompt_directory_path}/{directory_name}.py", "r+", encoding="utf-8"
) as file:
# Increment revision id
if versions.latest_revision is None:
# first revision
revision_id = "0001"
else:
# default branch with incrementation
latest_revision_id = versions.latest_revision
revision_id = f"{int(latest_revision_id)+1:04}"
# Create revision file
with open(
f"{class_directory}/{revision_id}_{directory_name}.py",
"w+",
encoding="utf-8",
) as file2:
custom_variables = {
"prev_revision_id": versions.current_revision,
"revision_id": revision_id,
}
file2.write(
write_prompt_to_template(
file.read(), MirascopeCommand.ADD, custom_variables
)
)
keys_to_update = {
CURRENT_REVISION_KEY: revision_id,
LATEST_REVISION_KEY: revision_id,
} | update_version_text_file(version_file_path, keys_to_update) | 8 | 2023-12-05 01:22:34+00:00 | 4k |
Prismadic/magnet | magnet/ron/llm.py | [
{
"identifier": "_f",
"path": "magnet/utils/globals.py",
"snippet": "def _f(\n tag: str = None,\n body: any = None,\n no_print: bool = False,\n luxe: bool = False\n):\n \"\"\"\n The `_f` function is a logging utility that prints messages with different tags and colors based on\n the provided parameters.\n\n :param tag: The `tag` parameter is a string that represents the tag for the log message. It can be\n one of the following values: \"FATAL\", \"WARN\", \"INFO\", \"WAIT\", or \"SUCCESS\"\n :type tag: str\n :param body: The `body` parameter is used to specify the message or content that you want to\n display. It can be of any type\n :type body: any\n :param no_print: The `no_print` parameter is a boolean flag that determines whether the output\n should be printed or returned as a string.\n the formatted string without printing it. If `no_print` is set to `False` (default)\n :type no_print: bool (optional)\n :param luxe: The `luxe` parameter is a boolean flag that determines whether to use a more luxurious\n and colorful output format. If `luxe` is set to `True`, the output will include random colors,\n emojis, and matrix-like characters.\n :type luxe: bool (optional)\n :return: The function `_f` returns a formatted string if the `no_print` parameter is set to `True`.\n If `no_print` is `False`, the function prints the formatted string and returns `None`.\n \"\"\"\n tags = [\n (\"FATAL\", \"☠️\", \"\\033[91m\"), # Red color for FATAL\n (\"WARN\", \"🚨\", \"\\033[93m\"), # Yellow color for WARN\n (\"INFO\", \"ℹ️\", \"\\033[94m\"), # Blue color for INFO\n (\"WAIT\", \"☕️\", \"\\033[96m\"), # Cyan color for WAIT\n (\"SUCCESS\", \"🌊\", \"\\033[92m\"), # Green color for SUCCESS\n ]\n _luxe = [\n \"\\033[31m\",\n \"\\033[32m\",\n \"\\033[33m\",\n \"\\033[34m\",\n \"\\033[35m\",\n \"\\033[36m\",\n \"\\033[91m\",\n \"\\033[92m\",\n \"\\033[93m\",\n \"\\033[94m\",\n \"\\033[95m\",\n \"\\033[96m\",\n ]\n _matrix = [\"⣾\", \"⣽\", \"⣻\", \"⢿\", \"⡿\", \"⣟\", \"⣯\", \"⣷\"]\n _joy = [\n \"🍤\",\n \"🌈\",\n \"📊\",\n \"🏁\",\n \"🌊\",\n \"🧠\",\n \"✨\",\n \"🧮\",\n \"🎉\",\n \"🥳\",\n \"🤩\",\n \"🐈\",\n \"❤️\",\n \"💙\",\n \"💜\",\n \"💚\",\n \"💛\",\n \"🧡\",\n \"⭐️\",\n ]\n matching_tags = [x for x in tags if x[0] == tag.upper()]\n if matching_tags:\n tag_text = matching_tags[0][0]\n emoji = matching_tags[0][1]\n color_code = matching_tags[0][2]\n if luxe:\n return (\n f\"{_luxe[random.randint(0,len(_luxe)-1)]} {_joy[random.randint(0,len(_joy)-1)]} {_matrix[random.randint(0,len(_matrix)-1)]}: {body}\\033[0m\"\n if no_print\n else print(\n f\"{_luxe[random.randint(0,len(_luxe)-1)]} {_joy[random.randint(0,len(_joy)-1)]} {_matrix[random.randint(0,len(_matrix)-1)]}: {body}\\033[0m\"\n )\n )\n else:\n return (\n f\"{color_code} {emoji} {tag_text}: {body}\\033[0m\"\n if no_print\n else print(f\"{color_code}{emoji} {tag_text}: {body}\\033[0m\")\n )\n else:\n print(f\"😭 UNKNOWN TAG - `{tag}`\")"
},
{
"identifier": "InferenceAPI",
"path": "magnet/utils/huggingface.py",
"snippet": "class InferenceAPI:\n \"\"\"\n A class that provides a convenient way to make HTTP POST requests to an inference API endpoint.\n\n Attributes:\n token (str): A string representing the token used for authorization.\n \"\"\"\n\n def __init__(self, token):\n \"\"\"\n Initializes the InferenceAPI class with a token.\n\n Args:\n token (str): A string representing the token used for authorization.\n \"\"\"\n self.token = token\n \n def invoke(self, payload):\n \"\"\"\n Makes an HTTP POST request to an inference API endpoint and returns the response.\n\n Args:\n payload (str): A JSON string representing the payload to be sent to the inference API. It should contain the model name and input data.\n\n Returns:\n str: A JSON string representing the response from the inference API.\n \"\"\"\n payload = json.loads(payload)\n headers = {\"Authorization\": f\"Bearer {self.token}\"}\n response = requests.post(f\"https://api-inference.huggingface.co/models/{payload['model']}\", headers=headers, json=payload)\n return response.json()"
},
{
"identifier": "LocalInference",
"path": "magnet/utils/local.py",
"snippet": "class LocalInference:\n def __init__(self, model):\n \"\"\"\n Initializes the LocalInference class with a pre-trained model.\n\n Args:\n model (str): The path to the pre-trained model used for inference.\n \"\"\"\n self.model = model\n \n def invoke(self, payload):\n \"\"\"\n Invokes a local inference using a pre-trained model.\n\n Args:\n payload (str): A JSON string containing the input parameters for the inference.\n\n Returns:\n str: The generated response from the local inference.\n \"\"\"\n payload = json.loads(payload)\n payload = {\n \"seed\": 2077,\n \"model_path\": self.model,\n \"temp\": payload[\"parameters\"][\"temperature\"],\n \"prompt\": payload[\"prompt\"],\n \"max_tokens\": payload[\"parameters\"][\"max_new_tokens\"],\n \"tokens_per_eval\": 10\n }\n response = mistral.generate(payload)\n return response"
}
] | from magnet.utils.globals import _f
from magnet.utils.huggingface import InferenceAPI
from magnet.utils.local import LocalInference
from magnet.utils.prompts import *
from magnet.utils.data_classes import *
import requests, json | 2,240 |
class Generate:
def __init__(self, server: str = None, field = None, hf_token: str = None):
"""
Initializes the Generate class.
Args:
server (str): The URL of the server to be used for generating the response. Default is None.
field: Placeholder field that can be used for future implementation.
hf_token (str): The Hugging Face token to be used for authentication when using the local inference API. Default is None.
"""
self.server = server if not hf_token else None
self.field = field
self.token = hf_token
async def on(self):
"""
Placeholder method that can be used for future implementation.
"""
if self.field:
pass # todo
async def ask(self
, m: str = "mistralai/Mistral-7B-Instruct-v0.1"
, q: str = "What is your itinerary?"
, t: float = 1.0
, n: int = 8096
, p: str = "qa_ref"
, cb: object = None
, docs: list = []
, v: bool = False
):
"""
Generates a response based on a given prompt using a language model.
Args:
m (str): The model name or identifier to be used for generating the response. Default is "mistralai/Mistral-7B-Instruct-v0.1".
q (str): The question or prompt for which a response is to be generated. Default is "What is your itinerary?".
t (float): The temperature parameter controlling the randomness of the generated response. Default is 1.0.
n (int): The maximum number of new tokens to be generated in the response. Default is 8096.
p (str): The type of prompt to be used for generating the response. Default is "qa_ref".
cb (object): An optional callback function to be executed with the generated response. Default is None.
docs (list): A list of additional context or documents to be used for generating the response. Default is an empty list.
v (bool): A flag indicating whether to use the server for generating the response. Default is False.
Returns:
str: The generated response.
Raises:
Exception: If an error occurs during the execution of the method.
"""
prompt = getattr(globals()['Prompts'](), p)(docs,q)
_f('warn', '(p + q + d) > n') if len(prompt) > n else None
payload = json.dumps({
"model": m,
"prompt": prompt,
"inputs": prompt,
"parameters": {
"max_new_tokens": n
, "temperature": t,
}
})
headers = {
'Content-Type': 'application/json'
}
if self.token:
|
class Generate:
def __init__(self, server: str = None, field = None, hf_token: str = None):
"""
Initializes the Generate class.
Args:
server (str): The URL of the server to be used for generating the response. Default is None.
field: Placeholder field that can be used for future implementation.
hf_token (str): The Hugging Face token to be used for authentication when using the local inference API. Default is None.
"""
self.server = server if not hf_token else None
self.field = field
self.token = hf_token
async def on(self):
"""
Placeholder method that can be used for future implementation.
"""
if self.field:
pass # todo
async def ask(self
, m: str = "mistralai/Mistral-7B-Instruct-v0.1"
, q: str = "What is your itinerary?"
, t: float = 1.0
, n: int = 8096
, p: str = "qa_ref"
, cb: object = None
, docs: list = []
, v: bool = False
):
"""
Generates a response based on a given prompt using a language model.
Args:
m (str): The model name or identifier to be used for generating the response. Default is "mistralai/Mistral-7B-Instruct-v0.1".
q (str): The question or prompt for which a response is to be generated. Default is "What is your itinerary?".
t (float): The temperature parameter controlling the randomness of the generated response. Default is 1.0.
n (int): The maximum number of new tokens to be generated in the response. Default is 8096.
p (str): The type of prompt to be used for generating the response. Default is "qa_ref".
cb (object): An optional callback function to be executed with the generated response. Default is None.
docs (list): A list of additional context or documents to be used for generating the response. Default is an empty list.
v (bool): A flag indicating whether to use the server for generating the response. Default is False.
Returns:
str: The generated response.
Raises:
Exception: If an error occurs during the execution of the method.
"""
prompt = getattr(globals()['Prompts'](), p)(docs,q)
_f('warn', '(p + q + d) > n') if len(prompt) > n else None
payload = json.dumps({
"model": m,
"prompt": prompt,
"inputs": prompt,
"parameters": {
"max_new_tokens": n
, "temperature": t,
}
})
headers = {
'Content-Type': 'application/json'
}
if self.token: | llm = InferenceAPI(self.token) | 1 | 2023-12-12 14:11:21+00:00 | 4k |
9tailwolf/Racing-Line-Optimization | classes/TrackPlotter.py | [
{
"identifier": "Vector",
"path": "classes/Vector.py",
"snippet": "class Vector:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return 'Vector(' + str(self.x) + ',' + str(self.y) + ')'\n\n def __mul__(self, other):\n return Vector(self.x * other, self.y * other)\n\n def __add__(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n\n def __sub__(self, other):\n return Vector(self.x - other.x, self.y - other.y)\n\n def __truediv__(self, other):\n return Vector(self.x / other, self.y / other)\n\n def size(self):\n return (self.x ** 2 + self.y ** 2) ** (1 / 2)\n\n def norm(self):\n size = self.size()\n return Vector(round(self.x / size, 3), round(self.y / size, 3))\n\n def right(self):\n return Vector(self.y, -self.x)\n\n def left(self):\n return Vector(-self.y, self.x)\n\n def crossproduct(self, other):\n return self.x * other.y - self.y * other.x\n\n def rotate(self, theta):\n new_x = self.x * math.cos(theta) - self.y * math.sin(theta)\n new_y = self.x * math.sin(theta) + self.y * math.cos(theta)\n\n return Vector(new_x,new_y)\n\n\n def degree(self, other):\n inner_product = self.x * other.x + self.y * other.y\n size = self.size() * other.size()\n cos = inner_product / size\n if cos > 1:\n cos = 1\n if cos < -1:\n cos = -1\n degree = math.acos(cos)\n return degree"
},
{
"identifier": "Optimizer",
"path": "classes/Optimizer.py",
"snippet": "class Optimizer:\n def __init__(self,rx,ry,lx,ly,norm,node_size:int,max_velocity:int,min_velocity:int,velocity_interval:int,max_theta:int,theta_interval:int):\n '''\n Tuned value for limit physical movements.\n It is recommended to use the above values s fixed values.\n Surely, there is a better tuned value. But it is hard to find.\n '''\n self.centripetal_force_limiter = 3000\n self.degree_limiter = 10\n self.accelation_degree_limiter = 3\n\n rx_temp = rx.copy()\n ry_temp = ry.copy()\n lx_temp = lx.copy()\n ly_temp = ly.copy()\n\n self.data = {'rx':rx_temp, 'ry':ry_temp, 'lx':lx_temp, 'ly':ly_temp}\n self.length = len(rx)\n\n self.norm = norm\n self.node_size = node_size\n self.max_velocity = max_velocity\n self.min_velocity = min_velocity\n self.velocity_interval = velocity_interval\n self.max_theta = max_theta\n self.theta_interval = theta_interval # odd\n\n for k in self.data.keys():\n self.data[k].append(self.data[k][-1])\n\n def get_velocity(self,i):\n if i<0 or i>= self.velocity_interval:\n raise 'Velocity Error'\n\n return (i) * (self.max_velocity-self.min_velocity) / (self.velocity_interval-1) + self.min_velocity\n\n def get_theta(self,i):\n if i<0 or i>= self.theta_interval:\n raise 'Theta Error'\n\n return (i * (self.max_theta * 2 / self.theta_interval - 1) - self.max_theta) / 180 * math.pi\n\n\n def memoization(self):\n if self.node_size<3:\n raise 'Size Error'\n\n dp = [[[[math.inf for _ in range(self.theta_interval)] for _ in range(self.velocity_interval)] for _ in range(self.node_size)] for _ in range(self.length)]\n\n dp_posx, dp_posy = [], []\n for i in range(self.length+1):\n x = [-1 for _ in range(self.node_size)]\n y = [-1 for _ in range(self.node_size)]\n x[0],x[-1],y[0],y[-1] = self.data['rx'][i],self.data['lx'][i],self.data['ry'][i],self.data['ly'][i]\n\n gap_x = (self.data['rx'][i] - self.data['lx'][i]) / (self.node_size - 1)\n gap_y = (self.data['ry'][i] - self.data['ly'][i]) / (self.node_size - 1)\n\n for i in range(1,self.node_size-1):\n x[i] = x[i-1] - gap_x\n y[i] = y[i-1] - gap_y\n\n dp_posx.append(x)\n dp_posy.append(y)\n\n self.dp_posx = dp_posx\n self.dp_posy = dp_posy\n\n\n return dp\n\n def optimization(self):\n dp = self.memoization()\n selected = [[[[-1 for _ in range(self.theta_interval)] for _ in range(self.velocity_interval)] for _ in range(self.node_size)] for _ in range(self.length)]\n\n for node in range(self.node_size):\n for v in range(self.velocity_interval):\n for t in range(self.theta_interval):\n dp[0][node][v][t] = 0\n\n for i in tqdm(range(1,self.length)):\n time.sleep(10**-6)\n for node in range(self.node_size): # current position\n for v in range(self.velocity_interval): # current speed\n for t in range(self.theta_interval): # current angle\n for pre_node in range(self.node_size): # previous position\n for pre_v in range(self.velocity_interval): # previous speed\n for pre_t in range(self.theta_interval): # previous angle\n if dp[i-1][pre_node][pre_v][pre_t] == math.inf:\n pass\n\n if dp[i][node][v][t] > dp[i-1][pre_node][pre_v][pre_t] + self.cost(i,node,v,t,i-1,pre_node,pre_v,pre_t):\n selected[i][node][v][t] = (pre_node,pre_v,pre_t)\n dp[i][node][v][t] = dp[i-1][pre_node][pre_v][pre_t] + self.cost(i,node,v,t,i-1,pre_node,pre_v,pre_t)\n\n\n res = math.inf\n result_pos = []\n result_vector = None\n\n for node in range(self.node_size):\n for v in range(self.velocity_interval):\n for t in range(self.theta_interval):\n if res > dp[-1][node][v][t]:\n res = dp[-1][node][v][t]\n result_vector = selected[i][node][v][t]\n\n result_pos.append(result_vector[0])\n\n for i in range(self.length - 2,0,-1):\n result_vector = selected[i][result_vector[0]][result_vector[1]][result_vector[2]]\n result_pos.append(result_vector[0])\n\n result_pos.reverse()\n result_posx, result_posy = [],[]\n for i in range(len(result_pos)):\n result_posx.append(self.dp_posx[i][result_pos[i]])\n result_posy.append(self.dp_posy[i][result_pos[i]])\n return result_posx, result_posy\n\n\n def cost(self, i1, node1, v1, t1, i2, node2, v2, t2):\n '''\n acceleration penelty\n '''\n if abs(v1 - v2)>1:\n return math.inf\n\n '''\n delta time\n '''\n v1vector = self.norm[i1].rotate(self.get_theta(t1)) * self.get_velocity(v1)\n v2vector = self.norm[i2].rotate(self.get_theta(t2)) * self.get_velocity(v2)\n avgv = (v1vector + v2vector) / 2\n ds = Vector(self.dp_posx[i1][node1] - self.dp_posx[i2][node2], self.dp_posy[i1][node1] - self.dp_posy[i2][node2])\n dt = ds.size() / avgv.size()\n if min(ds.degree(avgv) / math.pi * 180, 180 - ds.degree(avgv)/ math.pi * 180) > self.degree_limiter:\n return math.inf\n\n\n '''\n centripetal force penalty\n '''\n r = ds.size() / (v1vector.degree(v2vector) / math.pi * 180 + 10**-6)\n centripetal_accelation = avgv.size()**2 / r\n if centripetal_accelation > self.centripetal_force_limiter:\n return math.inf\n\n '''\n accelation penalty\n '''\n accelation = abs(v1vector.size() ** 2 - v2vector.size() ** 2) / (2 * ds.size())\n if accelation > 0 and min(ds.degree(avgv) / math.pi * 180, 180 - ds.degree(avgv)/ math.pi * 180) > self.accelation_degree_limiter:\n return math.inf\n\n return dt"
}
] | import matplotlib.pyplot as plt
import pandas as pd
from classes.Vector import Vector
from classes.Optimizer import Optimizer | 2,386 |
class TrackPlotter:
def __init__(self, circuit):
self.circuit = circuit
self.track = pd.read_csv('./tracks/' + self.circuit + '.csv')
self.track.columns = ['x','y','r','l']
self.make_track()
def make_track(self):
right_x, right_y, left_x, left_y,vector = [], [], [], [],[]
for i in range(self.track.shape[0]):
if i == self.track.shape[0] - 1:
|
class TrackPlotter:
def __init__(self, circuit):
self.circuit = circuit
self.track = pd.read_csv('./tracks/' + self.circuit + '.csv')
self.track.columns = ['x','y','r','l']
self.make_track()
def make_track(self):
right_x, right_y, left_x, left_y,vector = [], [], [], [],[]
for i in range(self.track.shape[0]):
if i == self.track.shape[0] - 1: | v = Vector(self.track['x'][0] - self.track['x'][i], self.track['y'][0] - self.track['y'][i]) | 0 | 2023-12-12 13:35:42+00:00 | 4k |
pdelboca/django-dcat | dcat/management/commands/import_from_datajson.py | [
{
"identifier": "Catalog",
"path": "dcat/models.py",
"snippet": "class Catalog(models.Model):\n \"\"\"A catalogue that hosts the Datasets or Data Services being described.\"\"\"\n\n # Mandatory properties\n title = models.CharField(max_length=255, help_text=\"A name given to the Catalogue.\")\n description = models.TextField(help_text=\"A free-text account of the Catalogue.\")\n publisher = models.ForeignKey(\n \"Agent\",\n on_delete=models.CASCADE,\n help_text=\"An entity (organisation) responsible for making the Catalogue available.\",\n )\n\n # Recommended properties\n licence = models.ForeignKey(\n \"LicenceDocument\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n help_text=\"A licence under which the Catalogue can be used or reused.\",\n )\n themes = models.ManyToManyField(\n \"DataTheme\",\n blank=True,\n help_text=\"A knowledge organization system used to classify the Catalogue's Datasets.\",\n )\n homepage = models.URLField(\n blank=True, help_text=\"A web page that acts as the main page for the Catalogue.\"\n )\n\n def __str__(self):\n return self.title"
},
{
"identifier": "Dataset",
"path": "dcat/models.py",
"snippet": "class Dataset(models.Model):\n \"\"\"A conceptual entity that represents the information published.\"\"\"\n\n # Mandatory properties\n title = models.CharField(max_length=255, help_text=\"A name given to the Dataset.\")\n catalog = models.ForeignKey(\"Catalog\", on_delete=models.CASCADE)\n\n # Recommended properties\n description = models.TextField(\n blank=True, help_text=\"A free-text account of the Dataset.\"\n )\n publisher = models.ForeignKey(\n \"Agent\",\n on_delete=models.SET_NULL,\n null=True,\n help_text=\"An entity (organisation) responsible for making the Dataset available.\",\n )\n themes = models.ManyToManyField(\n \"DataTheme\",\n blank=True,\n help_text=\"A category of the Dataset. A Dataset may be associated with multiple themes.\",\n )\n keywords = models.ManyToManyField(\n \"Keyword\", blank=True, help_text=\"A keyword or tag describing the Dataset.\"\n )\n\n # Optional properties\n modified = models.DateField(\n blank=True,\n null=True,\n help_text=\"The most recent date on which the Dataset was changed or modified.\",\n )\n issued = models.DateField(blank=True, null=True)\n landing_page = models.URLField(\n blank=True,\n help_text=\"A web page that provides access to the Dataset, its Distributions and/or additional information. It is intended to point to a landing page at the original data provider, not to a page on a site of a third party, such as an aggregator.\",\n )\n\n def __str__(self):\n return self.title"
},
{
"identifier": "Distribution",
"path": "dcat/models.py",
"snippet": "class Distribution(models.Model):\n \"\"\"A physical embodiment of the Dataset in a particular format.\n\n Example: A CSV file, an RDF file, etc.\n \"\"\"\n\n def _get_storage_path(instance, filename):\n \"\"\"Return the storage path of the file.\n\n The OS can complain if we store thousands of files in\n the same directory. So I'm inventing something to avoid\n this problem.\n \"\"\"\n return f\"files/datasets/{instance.dataset.pk}/{filename}\"\n\n # Mandatory properties\n dataset = models.ForeignKey(\"Dataset\", on_delete=models.CASCADE)\n\n @property\n def access_url(self):\n \"\"\"Return the access url of the file.\n\n If the file is hosted in another portal, the access_url is provided\n in the distribution. Otherwise, the access_url is the URL to the\n distribution.\n \"\"\"\n pass\n\n # Recomened properties\n title = models.CharField(\n max_length=255, blank=True, help_text=\"A name given to the Distribution.\"\n )\n description = models.TextField(\n blank=True, help_text=\"A free-text account of the Distribution.\"\n )\n file = models.FileField(upload_to=_get_storage_path, blank=True)\n format = models.ForeignKey(\n \"MediaType\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n help_text=\"The file format of the Distribution.\",\n )\n licence = models.ForeignKey(\n \"LicenceDocument\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n help_text=\"A licence under which the Distribution is made available. \",\n )\n\n external_download_url = models.URLField(\n blank=True,\n default=\"\",\n help_text=\"A URL that is a direct link to a downloadable file in a given format.\",\n )\n external_access_url = models.URLField(\n blank=True,\n default=\"\",\n help_text=\"A URL that gives access to a Distribution of the Dataset. The resource at the access URL may contain information about how to get the Dataset.\",\n )\n\n # Optional properties\n checksum = models.OneToOneField(\n \"Checksum\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n help_text=\"A mechanism that can be used to verify that the contents of a distribution have not changed. The checksum is related to the download_url.\",\n )\n\n @property\n def download_url(self):\n \"\"\"Return the download url of the file.\n\n If the file is hosted in another portal, the download_url is provided\n in the distribution. Otherwise, the download_url is generated from the\n file field.\n\n This field is not mandatory so it can return an empty string (a distrubution\n can contain only an access_url.)\n \"\"\"\n if self.external_download_url:\n return self.external_download_url\n if self.file:\n return self.file.url\n return \"\"\n\n def calculate_md5_checksum(self):\n \"\"\"Calculates the md5 checksum of the file.\"\"\"\n md5_hash = hashlib.md5()\n with self.file.open(mode=\"rb\") as f:\n while chunk := f.read(4096):\n md5_hash.update(chunk)\n return md5_hash.hexdigest()\n\n def __str__(self):\n return self.title"
},
{
"identifier": "Agent",
"path": "dcat/models.py",
"snippet": "class Agent(models.Model):\n \"\"\"Any entity carrying out actions with respect to the (Core) entities.\n\n Example: Publishers, Creators, etc.\n \"\"\"\n\n # Mandatory properties\n name = models.CharField(max_length=255, help_text=\"A name of the Agent.\")\n\n # Recommended properties\n type = models.CharField(\n max_length=20,\n blank=True,\n help_text=\"A type of the agent that makes the Catalogue or Dataset available.\",\n )\n\n # Optional properties\n mbox = models.EmailField(\n blank=True, null=True, help_text=\"An email address of the Agent.\"\n )\n\n def __str__(self):\n return self.name"
},
{
"identifier": "MediaType",
"path": "dcat/models.py",
"snippet": "class MediaType(models.Model):\n \"\"\"A set of media types from the DCAT-AP vocabulary.\"\"\"\n\n extension = models.CharField(max_length=10)\n code = models.CharField(max_length=10, unique=True, blank=True, null=True)\n media_type = models.CharField(max_length=50, blank=True)\n description = models.TextField(blank=True)\n\n def __str__(self):\n return self.extension"
},
{
"identifier": "LicenceDocument",
"path": "dcat/models.py",
"snippet": "class LicenceDocument(models.Model):\n \"\"\"A set of licences from the DCAT-AP vocabulary.\"\"\"\n\n # Recommended properties\n @property\n def type(self):\n return self.url_general or self.label\n\n label = models.CharField(max_length=255)\n code = models.CharField(max_length=10, unique=True, blank=True, null=True)\n url_general = models.URLField(blank=True, default=\"\")\n url_document = models.URLField(blank=True, default=\"\")\n\n def __str__(self):\n return self.label"
},
{
"identifier": "DataTheme",
"path": "dcat/models.py",
"snippet": "class DataTheme(models.Model):\n \"\"\"Themes used for dataset classification.\n\n This is the mandatory controlled vocabulary for\n the themeTaxonomy field of Catalogue. (As defined\n by DCAT-AP).\n\n https://op.europa.eu/s/y52L\n \"\"\"\n\n code = models.CharField(max_length=255, unique=True)\n label = models.CharField(max_length=255)\n description = models.TextField(blank=True, default=\"\")\n\n def __str__(self):\n return self.label"
},
{
"identifier": "Keyword",
"path": "dcat/models.py",
"snippet": "class Keyword(models.Model):\n \"\"\"A keyword or tag describing the Dataset.\"\"\"\n\n name = models.CharField(max_length=50)\n slug = models.SlugField()\n\n def __str__(self):\n return self.name"
}
] | import json
import pathlib
from os import listdir
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from dcat.models import (
Catalog,
Dataset,
Distribution,
Agent,
MediaType,
LicenceDocument,
DataTheme,
Keyword,
) | 2,765 |
class Command(BaseCommand):
help = "Import data from a DCAT-US file provided by ckanext-datajson."
def _get_content_file(self, dataset, distribution, datapath="data"):
"""Returns a ContentFile to be added to the django model.
Requires the following structure:
- datapath/
- {dataset_identifier}/
- {distribution_identifier}/
- some-file.csv
"""
file_folder = (
f'{datapath}/{dataset.get("identifier")}/{distribution.get("identifier")}'
)
file = None
try:
local_file_name = listdir(file_folder)[0]
file_path = f"{file_folder}/{local_file_name}"
file = ContentFile(
open(file_path, mode="rb").read(), name=distribution.get("fileName")
)
except IndexError:
msg = f'{distribution.get("identifier")} folder does not have a file'
self.stdout.write(self.style.ERROR(msg))
return file
def add_arguments(self, parser):
parser.add_argument(
"--file", type=open, help="Path to the data.json file", default="data.json"
)
parser.add_argument(
"--datapath",
type=pathlib.Path,
help="Path to the data folder",
default="data",
)
def handle(self, *args, **options):
datapath = options.get("datapath")
if not datapath.exists():
msg = f"{datapath} path to data does not exist."
self.stdout.write(self.style.ERROR(msg))
return
with options.get("file") as file:
data = json.load(file)
title = data.get("title")
description = data.get("description")
publisher, _ = Agent.objects.get_or_create(
name=data.get("publisher").get("name"),
mbox=data.get("publisher").get("mbox", ""),
)
catalog_licence, _ = LicenceDocument.objects.get_or_create(
label=data.get("license")
)
catalog = Catalog.objects.create(
title=title,
description=description,
publisher=publisher,
licence=catalog_licence,
)
for theme in data.get("themeTaxonomy", []):
theme_id = theme.get("id")
theme_label = theme.get("label")
theme_description = theme.get("description")
theme_obj, _ = DataTheme.objects.get_or_create(
code=theme_id,
label=theme_label,
description=theme_description,
)
catalog.themes.add(theme_obj)
# Import Datasets
datasets = data.get("dataset")
for dataset in datasets:
dataset_info = {}
dataset_info["title"] = dataset.get("title")
dataset_info["description"] = dataset.get("description")
dataset_info["publisher"], _ = Agent.objects.get_or_create(
name=dataset.get("publisher").get("name"),
mbox=dataset.get("publisher").get("mbox", ""),
)
dataset_info["catalog"] = catalog
|
class Command(BaseCommand):
help = "Import data from a DCAT-US file provided by ckanext-datajson."
def _get_content_file(self, dataset, distribution, datapath="data"):
"""Returns a ContentFile to be added to the django model.
Requires the following structure:
- datapath/
- {dataset_identifier}/
- {distribution_identifier}/
- some-file.csv
"""
file_folder = (
f'{datapath}/{dataset.get("identifier")}/{distribution.get("identifier")}'
)
file = None
try:
local_file_name = listdir(file_folder)[0]
file_path = f"{file_folder}/{local_file_name}"
file = ContentFile(
open(file_path, mode="rb").read(), name=distribution.get("fileName")
)
except IndexError:
msg = f'{distribution.get("identifier")} folder does not have a file'
self.stdout.write(self.style.ERROR(msg))
return file
def add_arguments(self, parser):
parser.add_argument(
"--file", type=open, help="Path to the data.json file", default="data.json"
)
parser.add_argument(
"--datapath",
type=pathlib.Path,
help="Path to the data folder",
default="data",
)
def handle(self, *args, **options):
datapath = options.get("datapath")
if not datapath.exists():
msg = f"{datapath} path to data does not exist."
self.stdout.write(self.style.ERROR(msg))
return
with options.get("file") as file:
data = json.load(file)
title = data.get("title")
description = data.get("description")
publisher, _ = Agent.objects.get_or_create(
name=data.get("publisher").get("name"),
mbox=data.get("publisher").get("mbox", ""),
)
catalog_licence, _ = LicenceDocument.objects.get_or_create(
label=data.get("license")
)
catalog = Catalog.objects.create(
title=title,
description=description,
publisher=publisher,
licence=catalog_licence,
)
for theme in data.get("themeTaxonomy", []):
theme_id = theme.get("id")
theme_label = theme.get("label")
theme_description = theme.get("description")
theme_obj, _ = DataTheme.objects.get_or_create(
code=theme_id,
label=theme_label,
description=theme_description,
)
catalog.themes.add(theme_obj)
# Import Datasets
datasets = data.get("dataset")
for dataset in datasets:
dataset_info = {}
dataset_info["title"] = dataset.get("title")
dataset_info["description"] = dataset.get("description")
dataset_info["publisher"], _ = Agent.objects.get_or_create(
name=dataset.get("publisher").get("name"),
mbox=dataset.get("publisher").get("mbox", ""),
)
dataset_info["catalog"] = catalog | dataset_created = Dataset.objects.create(**dataset_info) | 1 | 2023-12-10 17:26:39+00:00 | 4k |
ebb-earl-co/tidal-wave | tidal_wave/login.py | [
{
"identifier": "BearerAuth",
"path": "tidal_wave/models.py",
"snippet": "class BearerAuth(AuthBase):\n \"\"\"A class to be passed to the `auth` argument in a `requests.Session`\n constructor\"\"\"\n\n def __init__(self, token: str):\n self.token = token\n\n def __call__(self, r):\n r.headers[\"Authorization\"] = f\"Bearer {self.token}\"\n return r"
},
{
"identifier": "SessionsEndpointResponseJSON",
"path": "tidal_wave/models.py",
"snippet": "class SessionsEndpointResponseJSON(dataclass_wizard.JSONWizard):\n session_id: str # UUID4 value, really\n user_id: int\n country_code: str # 2-digit country code according to some ISO\n channel_id: int\n partner_id: int\n client: \"Client\""
},
{
"identifier": "TOKEN_DIR_PATH",
"path": "tidal_wave/oauth.py",
"snippet": "TOKEN_DIR_PATH: Path = user_config_path() / PROJECT_NAME"
},
{
"identifier": "BearerToken",
"path": "tidal_wave/oauth.py",
"snippet": "class BearerToken:\n \"\"\"This class represents a token used in bearer authentication\n (https://swagger.io/docs/specification/authentication/bearer-authentication/)\n with the Tidal API.\"\"\"\n\n access_token: str = field(repr=False)\n client_name: str # \"TIDAL_Android_2.38.0_Fire_TV_Atmos\"\n expiration: Union[str, datetime] = field(repr=False)\n refresh_token: str = field(repr=False)\n user_id: int\n user_name: str\n\n def __post_init__(self):\n self.client_id, self.client_secret = (\n TidalOauth().client_id,\n TidalOauth().client_secret,\n )\n if isinstance(self.expiration, str):\n try:\n self.expiration = datetime.fromisoformat(self.expiration)\n except ValueError:\n raise TokenException(\n \"Expiration must be a datetime or datetime-like str\"\n )\n\n @property\n def is_expired(self) -> bool:\n \"\"\"Returns whether self.expiration is in the past, by comparing with\n datetime.datetime.now(tz=datetime.timezone.utc).\"\"\"\n return False if datetime.now(tz=timezone.utc) < self.expiration else True\n\n def save(self, p: Path = TOKEN_DIR_PATH / \"fire_tv-tidal.token\"):\n \"\"\"Write some attributes as base64-encoded JSON to path on disk, p\"\"\"\n d: Dict[str, str] = {\n \"access_token\": self.access_token,\n \"client_name\": self.client_name,\n \"expiration\": self.expiration.isoformat(),\n \"refresh_token\": self.refresh_token,\n \"user_id\": self.user_id,\n \"user_name\": self.user_name,\n }\n outdata: bytes = base64.b64encode(json.dumps(d).encode(\"UTF-8\"))\n p.write_bytes(outdata)\n\n @classmethod\n def load(\n cls, p: Path = TOKEN_DIR_PATH / \"fire_tv-tidal.token\"\n ) -> Optional[\"BearerToken\"]:\n \"\"\"Read base64-encoded JSON object from disk. If no error arises,\n return a BearerToken instance; else, return None\"\"\"\n\n try:\n data = json.loads(base64.b64decode(p.read_bytes()))\n except FileNotFoundError:\n logger.exception(\n TokenException(f\"File '{str(p.absolute())}' does not exist\")\n )\n return\n except json.JSONDecodeError:\n logger.exception(\n TokenException(f\"Could not parse JSON data from '{str(p.absolute())}'\")\n )\n return\n except UnicodeDecodeError:\n logger.exception(\n TokenException(\n f\"File '{str(p.absolute())}' does not appear to be base64-encoded\"\n )\n )\n return\n\n data_args = (\n data.get(a)\n for a in (\n \"access_token\",\n \"client_name\",\n \"expiration\",\n \"refresh_token\",\n \"user_id\",\n \"user_name\",\n )\n )\n\n _token: BearerToken = cls(*data_args)\n return _token\n\n def refresh(self):\n \"\"\"If self.access_token is expired, go through the token refresh process:\n https://oauth.net/2/refresh-tokens/. If successful, various attributes of\n self are overwritten: most importantly, self.expiration & self.access_token\n \"\"\"\n _data = {\n \"client_id\": self.client_id,\n \"refresh_token\": self.refresh_token,\n \"grant_type\": \"refresh_token\",\n \"scope\": \"r_usr+w_usr+w_sub\",\n }\n _auth = (self.client_id, self.client_secret)\n with requests.post(\n url=f\"{OAUTH2_URL}/token\", data=_data, auth=_auth, headers=OAUTH2_HEADERS\n ) as resp:\n try:\n resp.raise_for_status()\n except requests.HTTPError:\n raise TokenException(\n f\"Could not refresh bearer token: HTTP error code {resp.status_code}\"\n )\n else:\n token_json = resp.json()\n\n self.access_token = token_json.get(\"access_token\")\n if token_json.get(\"clientName\", token_json.get(\"client_name\")) is not None:\n self.client_name = token_json.get(\n \"clientName\", token_json.get(\"client_name\")\n )\n if token_json.get(\"userId\", token_json.get(\"user_id\")) is not None:\n self.user_id = token_json.get(\"userId\", token_json.get(\"user_id\"))\n if token_json.get(\"userName\", token_json.get(\"user_name\")) is not None:\n self.user_name = token_json.get(\"userName\", token_json.get(\"user_name\"))\n\n _timedelta = timedelta(seconds=token_json.get(\"expires_in\") - 300)\n self.expiration = datetime.now(tz=timezone.utc) + _timedelta"
},
{
"identifier": "TidalOauth",
"path": "tidal_wave/oauth.py",
"snippet": "class TidalOauth:\n \"\"\"This class encapsulates attributes and methods to do with authenticating\n with the Tidal OAuth API. In particular, the authorization_code_flow()\n method implements the authorization code flow part of the OAuth 2.0\n specification:\n https://auth0.com/docs/get-started/authentication-and-authorization-flow/authorization-code-flow\n The client_id and client_secret attributes are gleaned from other projects'\n work, especially\n https://github.com/Dniel97/RedSea/blob/4ba02b88cee33aeb735725cb854be6c66ff372d4/config/settings.example.py#L68\n \"\"\"\n\n def __post_init__(self):\n self._client_id: str = \"7m7Ap0JC9j1cOM3n\"\n self._client_secret: str = \"vRAdA108tlvkJpTsGZS8rGZ7xTlbJ0qaZ2K9saEzsgY=\"\n self.token: Optional[BearerToken] = None\n self.verification_url: Optional[str] = None\n\n @property\n def client_id(self) -> str:\n return self._client_id\n\n @property\n def client_secret(self) -> str:\n return self._client_secret\n\n def post_device_authorization(self, headers: Dict[str, str] = OAUTH2_HEADERS):\n \"\"\"Send a POST request to the /device_authorization endpoint of Tidal's\n authentication API. If error, raises AuthorizationException. Else,\n return an DeviceAuthorizationEndpointResponseJSON instance with five\n attributes:\n device_code, user_code, verification_uri_complete, expires_in, interval\n \"\"\"\n _url: str = f\"{OAUTH2_URL}/device_authorization\"\n _data: Dict[str, str] = {\n \"client_id\": self.client_id,\n \"scope\": \"r_usr+w_usr+w_sub\",\n }\n with requests.post(url=_url, data=_data, headers=headers) as resp:\n try:\n resp.raise_for_status()\n except requests.HTTPError as he:\n raise AuthorizationException(he.args[0])\n\n daerj = DeviceAuthorizationEndpointResponseJSON.from_dict(resp.json())\n\n self.device_authorization = daerj\n self.verification_url: str = f\"http://{daerj.verification_uri_complete}\"\n # \"Date\" header is in the \"%a, %d %b %Y %H:%M:%S %Z\" format:\n # e.g. \"Wed, 06 Dec 2023 05:11:11 GMT\".\n # So, parsedate_to_datetime converts the above into\n # datetime.datetime(2023, 12, 6, 5, 11, 11, tzinfo=datetime.timezone.utc)\n self.verification_expiration: datetime = parsedate_to_datetime(\n resp.headers.get(\"Date\")\n ) + timedelta(seconds=daerj.expires_in)\n\n def authorization_code_flow(\n self, headers: Dict[str, str] = OAUTH2_HEADERS\n ) -> BearerToken:\n \"\"\"Returns an instance of BearerToken by authenticating\n with the Tidal OAuth 2.0 API /token endpoint. Upon error,\n raises AuthorizationException\"\"\"\n _data = {\n \"client_id\": self.client_id,\n \"grant_type\": \"urn:ietf:params:oauth:grant-type:device_code\",\n \"scope\": \"r_usr+w_usr+w_sub\",\n }\n if self.verification_url is None:\n self.post_device_authorization()\n\n _data[\"device_code\"] = self.device_authorization.device_code\n _auth = (self.client_id, self.client_secret)\n\n print(\n \"\\nCopy this URL, then navigate to it in a browser: \"\n f\"{self.verification_url}\\n\",\n file=sys.stderr,\n )\n\n while datetime.now(tz=timezone.utc) < self.verification_expiration:\n with requests.post(\n url=f\"{OAUTH2_URL}/token\", headers=headers, data=_data, auth=_auth\n ) as resp:\n if not resp.ok:\n time.sleep(self.device_authorization.interval * 2)\n continue\n else:\n break\n else:\n raise AuthorizationException(\n \"OAuth login process has timed out. Please try again.\"\n )\n\n logger.info(\"Successfully authenticated with Tidal API.\")\n\n _token = TokenEndpointResponseJSON.from_dict(resp.json())\n if _token.token_type.lower() == \"bearer\":\n return BearerToken(\n access_token=_token.access_token,\n client_name=_token.client_name,\n expiration=_token.expiration,\n refresh_token=_token.refresh_token,\n user_id=_token.user_id,\n user_name=_token.user.username,\n )\n else:\n raise TokenException(\n f\"Expected a bearer token, but received token type: {_token.token_type}\"\n )"
},
{
"identifier": "TokenException",
"path": "tidal_wave/oauth.py",
"snippet": "class TokenException(Exception):\n pass"
},
{
"identifier": "TIDAL_API_URL",
"path": "tidal_wave/utils.py",
"snippet": "TIDAL_API_URL: str = \"https://api.tidal.com/v1\""
}
] | import base64
import json
import logging
import sys
import requests
import typer
from enum import Enum
from pathlib import Path
from typing import Dict, Optional, Set, Tuple
from .models import BearerAuth, SessionsEndpointResponseJSON
from .oauth import (
TOKEN_DIR_PATH,
BearerToken,
TidalOauth,
TokenException,
)
from .utils import TIDAL_API_URL | 3,227 |
COMMON_HEADERS: Dict[str, str] = {"Accept-Encoding": "gzip, deflate, br"}
logger = logging.getLogger(__name__)
class AudioFormat(str, Enum):
sony_360_reality_audio = "360"
dolby_atmos = "Atmos"
hi_res = "HiRes"
mqa = "MQA"
lossless = "Lossless"
high = "High"
low = "Low"
class LogLevel(str, Enum):
debug = "DEBUG" # 10
info = "INFO" # 20
warning = "WARNING" # 30
error = "ERROR" # 40
critical = "CRITICAL" # 50
def load_token_from_disk(
token_path: Path = TOKEN_DIR_PATH / "android-tidal.token",
) -> Optional[str]:
"""Attempt to read `token_path` from disk and decoded its contents
as JSON"""
if not token_path.exists():
logger.warning(f"FileNotFoundError: {str(token_path.absolute())}")
return
token_file_contents: str = token_path.read_bytes()
decoded_token_file_contents: str = base64.b64decode(token_file_contents).decode(
"utf-8"
)
try:
bearer_token_json: dict = json.loads(decoded_token_file_contents)
except json.decoder.JSONDecodeError:
logger.warning(f"File '{token_path.absolute()}' cannot be parsed as JSON")
return
else:
return bearer_token_json.get("access_token")
def validate_token(
token: str, headers: Dict[str, str] = COMMON_HEADERS
) -> Optional[requests.Session]:
"""Send a GET request to the /sessions endpoint of Tidal's API.
If `token` is valid, use the SessionsEndpointResponseJSON object
that was returned from the API to create a requests.Session object with
some additional attributes. Otherwise, return None"""
auth_headers: Dict[str, str] = {**headers, "Authorization": f"Bearer {token}"}
sess: Optional[requests.Session] = None
with requests.get(url=f"{TIDAL_API_URL}/sessions", headers=auth_headers) as r:
try:
r.raise_for_status()
except requests.HTTPError as h:
if r.status_code == 401:
logger.error("Token is not authorized")
return sess
else:
logger.exception(h)
return sess
serj = SessionsEndpointResponseJSON.from_dict(r.json())
logger.debug("Adding data from API reponse to session object:")
logger.debug(serj)
sess: requests.Session = requests.Session()
sess.headers: Dict[str, str] = headers
|
COMMON_HEADERS: Dict[str, str] = {"Accept-Encoding": "gzip, deflate, br"}
logger = logging.getLogger(__name__)
class AudioFormat(str, Enum):
sony_360_reality_audio = "360"
dolby_atmos = "Atmos"
hi_res = "HiRes"
mqa = "MQA"
lossless = "Lossless"
high = "High"
low = "Low"
class LogLevel(str, Enum):
debug = "DEBUG" # 10
info = "INFO" # 20
warning = "WARNING" # 30
error = "ERROR" # 40
critical = "CRITICAL" # 50
def load_token_from_disk(
token_path: Path = TOKEN_DIR_PATH / "android-tidal.token",
) -> Optional[str]:
"""Attempt to read `token_path` from disk and decoded its contents
as JSON"""
if not token_path.exists():
logger.warning(f"FileNotFoundError: {str(token_path.absolute())}")
return
token_file_contents: str = token_path.read_bytes()
decoded_token_file_contents: str = base64.b64decode(token_file_contents).decode(
"utf-8"
)
try:
bearer_token_json: dict = json.loads(decoded_token_file_contents)
except json.decoder.JSONDecodeError:
logger.warning(f"File '{token_path.absolute()}' cannot be parsed as JSON")
return
else:
return bearer_token_json.get("access_token")
def validate_token(
token: str, headers: Dict[str, str] = COMMON_HEADERS
) -> Optional[requests.Session]:
"""Send a GET request to the /sessions endpoint of Tidal's API.
If `token` is valid, use the SessionsEndpointResponseJSON object
that was returned from the API to create a requests.Session object with
some additional attributes. Otherwise, return None"""
auth_headers: Dict[str, str] = {**headers, "Authorization": f"Bearer {token}"}
sess: Optional[requests.Session] = None
with requests.get(url=f"{TIDAL_API_URL}/sessions", headers=auth_headers) as r:
try:
r.raise_for_status()
except requests.HTTPError as h:
if r.status_code == 401:
logger.error("Token is not authorized")
return sess
else:
logger.exception(h)
return sess
serj = SessionsEndpointResponseJSON.from_dict(r.json())
logger.debug("Adding data from API reponse to session object:")
logger.debug(serj)
sess: requests.Session = requests.Session()
sess.headers: Dict[str, str] = headers | sess.auth: BearerAuth = BearerAuth(token=token) | 0 | 2023-12-12 21:50:25+00:00 | 4k |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.