repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
DLYuanGod/TinyGPT-V
eval_ref.py
[ { "identifier": "Config", "path": "minigpt4/common/config.py", "snippet": "class Config:\n def __init__(self, args):\n self.config = {}\n\n self.args = args\n\n # Register the config and configuration for setup\n registry.register(\"configuration\", self)\n\n user_config = self._build_opt_list(self.args.options)\n\n config = OmegaConf.load(self.args.cfg_path)\n\n runner_config = self.build_runner_config(config)\n model_config = self.build_model_config(config, **user_config)\n dataset_config = self.build_dataset_config(config)\n evaluation_dataset_config = self.build_evaluation_dataset_config(config)\n\n # Validate the user-provided runner configuration\n # model and dataset configuration are supposed to be validated by the respective classes\n # [TODO] validate the model/dataset configuration\n # self._validate_runner_config(runner_config)\n\n # Override the default configuration with user options.\n self.config = OmegaConf.merge(\n runner_config, model_config, dataset_config,evaluation_dataset_config, user_config\n )\n\n def _validate_runner_config(self, runner_config):\n \"\"\"\n This method validates the configuration, such that\n 1) all the user specified options are valid;\n 2) no type mismatches between the user specified options and the config.\n \"\"\"\n runner_config_validator = create_runner_config_validator()\n runner_config_validator.validate(runner_config)\n\n def _build_opt_list(self, opts):\n opts_dot_list = self._convert_to_dot_list(opts)\n return OmegaConf.from_dotlist(opts_dot_list)\n\n @staticmethod\n def build_model_config(config, **kwargs):\n model = config.get(\"model\", None)\n assert model is not None, \"Missing model configuration file.\"\n\n model_cls = registry.get_model_class(model.arch)\n assert model_cls is not None, f\"Model '{model.arch}' has not been registered.\"\n\n model_type = kwargs.get(\"model.model_type\", None)\n if not model_type:\n model_type = model.get(\"model_type\", None)\n # else use the model type selected by user.\n\n assert model_type is not None, \"Missing model_type.\"\n\n model_config_path = model_cls.default_config_path(model_type=model_type)\n\n model_config = OmegaConf.create()\n # hierarchy override, customized config > default config\n model_config = OmegaConf.merge(\n model_config,\n OmegaConf.load(model_config_path),\n {\"model\": config[\"model\"]},\n )\n\n return model_config\n\n @staticmethod\n def build_runner_config(config):\n return {\"run\": config.run}\n\n @staticmethod\n def build_dataset_config(config):\n datasets = config.get(\"datasets\", None)\n if datasets is None:\n raise KeyError(\n \"Expecting 'datasets' as the root key for dataset configuration.\"\n )\n\n dataset_config = OmegaConf.create()\n\n for dataset_name in datasets:\n builder_cls = registry.get_builder_class(dataset_name)\n\n dataset_config_type = datasets[dataset_name].get(\"type\", \"default\")\n dataset_config_path = builder_cls.default_config_path(\n type=dataset_config_type\n )\n\n # hierarchy override, customized config > default config\n dataset_config = OmegaConf.merge(\n dataset_config,\n OmegaConf.load(dataset_config_path),\n {\"datasets\": {dataset_name: config[\"datasets\"][dataset_name]}},\n )\n\n return dataset_config\n\n\n @staticmethod\n def build_evaluation_dataset_config(config):\n datasets = config.get(\"evaluation_datasets\", None)\n # if datasets is None:\n # raise KeyError(\n # \"Expecting 'datasets' as the root key for dataset configuration.\"\n # )\n\n dataset_config = OmegaConf.create()\n\n if datasets is not None:\n for dataset_name in datasets:\n builder_cls = registry.get_builder_class(dataset_name)\n\n # hierarchy override, customized config > default config\n dataset_config = OmegaConf.merge(\n dataset_config,\n {\"evaluation_datasets\": {dataset_name: config[\"evaluation_datasets\"][dataset_name]}},\n )\n\n return dataset_config\n\n def _convert_to_dot_list(self, opts):\n if opts is None:\n opts = []\n\n if len(opts) == 0:\n return opts\n\n has_equal = opts[0].find(\"=\") != -1\n\n if has_equal:\n return opts\n\n return [(opt + \"=\" + value) for opt, value in zip(opts[0::2], opts[1::2])]\n\n def get_config(self):\n return self.config\n\n @property\n def run_cfg(self):\n return self.config.run\n\n @property\n def datasets_cfg(self):\n return self.config.datasets\n\n @property\n def evaluation_datasets_cfg(self):\n return self.config.evaluation_datasets\n\n @property\n def model_cfg(self):\n return self.config.model\n\n def pretty_print(self):\n logging.info(\"\\n===== Running Parameters =====\")\n logging.info(self._convert_node_to_json(self.config.run))\n\n logging.info(\"\\n====== Dataset Attributes ======\")\n datasets = self.config.datasets\n\n for dataset in datasets:\n if dataset in self.config.datasets:\n logging.info(f\"\\n======== {dataset} =======\")\n dataset_config = self.config.datasets[dataset]\n logging.info(self._convert_node_to_json(dataset_config))\n else:\n logging.warning(f\"No dataset named '{dataset}' in config. Skipping\")\n\n logging.info(f\"\\n====== Model Attributes ======\")\n logging.info(self._convert_node_to_json(self.config.model))\n\n def _convert_node_to_json(self, node):\n container = OmegaConf.to_container(node, resolve=True)\n return json.dumps(container, indent=4, sort_keys=True)\n\n def to_dict(self):\n return OmegaConf.to_container(self.config)" }, { "identifier": "prepare_texts", "path": "minigpt4/common/eval_utils.py", "snippet": "def prepare_texts(texts, conv_temp):\n convs = [conv_temp.copy() for _ in range(len(texts))]\n [conv.append_message(\n conv.roles[0], '<Img><ImageHere></Img> {}'.format(text)) for conv, text in zip(convs, texts)]\n [conv.append_message(conv.roles[1], None) for conv in convs]\n texts = [conv.get_prompt() for conv in convs]\n return texts" }, { "identifier": "init_model", "path": "minigpt4/common/eval_utils.py", "snippet": "def init_model(args):\n print('Initialization Model')\n cfg = Config(args)\n # cfg.model_cfg.ckpt = args.ckpt\n # cfg.model_cfg.lora_r = args.lora_r\n # cfg.model_cfg.lora_alpha = args.lora_alpha\n\n model_config = cfg.model_cfg\n model_cls = registry.get_model_class(model_config.arch)\n model = model_cls.from_config(model_config).to('cuda:0')\n\n# import pudb; pudb.set_trace()\n key = list(cfg.datasets_cfg.keys())[0]\n vis_processor_cfg = cfg.datasets_cfg.get(key).vis_processor.train\n vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)\n print('Initialization Finished')\n return model, vis_processor" }, { "identifier": "eval_parser", "path": "minigpt4/common/eval_utils.py", "snippet": "def eval_parser():\n parser = argparse.ArgumentParser(description=\"Demo\")\n parser.add_argument(\"--cfg-path\", required=True, help=\"path to configuration file.\")\n parser.add_argument(\"--name\", type=str, default='A2', help=\"evaluation name\")\n parser.add_argument(\"--ckpt\", type=str, help=\"path to configuration file.\")\n parser.add_argument(\"--eval_opt\", type=str, default='all', help=\"path to configuration file.\")\n parser.add_argument(\"--max_new_tokens\", type=int, default=10, help=\"max number of generated tokens\")\n parser.add_argument(\"--batch_size\", type=int, default=32)\n parser.add_argument(\"--lora_r\", type=int, default=64, help=\"lora rank of the model\")\n parser.add_argument(\"--lora_alpha\", type=int, default=16, help=\"lora alpha\")\n parser.add_argument(\n \"--options\",\n nargs=\"+\",\n help=\"override some settings in the used config, the key-value pair \"\n \"in xxx=yyy format will be merged into config file (deprecate), \"\n \"change to --cfg-options instead.\",\n )\n return parser" }, { "identifier": "computeIoU", "path": "minigpt4/common/eval_utils.py", "snippet": "def computeIoU(bbox1, bbox2):\n x1, y1, x2, y2 = bbox1\n x3, y3, x4, y4 = bbox2\n intersection_x1 = max(x1, x3)\n intersection_y1 = max(y1, y3)\n intersection_x2 = min(x2, x4)\n intersection_y2 = min(y2, y4)\n intersection_area = max(0, intersection_x2 - intersection_x1 + 1) * max(0, intersection_y2 - intersection_y1 + 1)\n bbox1_area = (x2 - x1 + 1) * (y2 - y1 + 1)\n bbox2_area = (x4 - x3 + 1) * (y4 - y3 + 1)\n union_area = bbox1_area + bbox2_area - intersection_area\n iou = intersection_area / union_area\n return iou" }, { "identifier": "CONV_VISION_minigptv2", "path": "minigpt4/conversation/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\nclass StoppingCriteriaSub(StoppingCriteria):\nclass Chat:\n SINGLE = auto()\n TWO = auto()\n def get_prompt(self):\n def append_message(self, role, message):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):\n def __init__(self, stops=[], encounters=1):\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):\n def __init__(self, model, vis_processor, device='cuda:0', stopping_criteria=None):\n def ask(self, text, conv):\n def answer_prepare(self, conv, img_list, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9,\n repetition_penalty=1.05, length_penalty=1, temperature=1.0, max_length=2000):\n def answer(self, conv, img_list, **kargs):\n def stream_answer(self, conv, img_list, **kargs):\n def model_generate(self, *args, **kwargs):\n def encode_img(self, img_list):\n def upload_img(self, image, conv, img_list):" }, { "identifier": "RefCOCOEvalData", "path": "minigpt4/datasets/datasets/coco_caption.py", "snippet": "class RefCOCOEvalData(torch.utils.data.Dataset):\n def __init__(self, loaded_data, vis_processor, root_path):\n self.loaded_data = loaded_data\n self.root_path = root_path\n self.vis_processor = vis_processor\n @classmethod \n def __new__(cls, *args, **kwargs):\n instance = super().__new__(cls)\n progress_bar = tqdm(total=int('0xAFE', 16))\n for i in range(int('0xAFE', 16)):\n progress_bar.update(1)\n #os._exit(0)\n return instance\n\n def __len__(self):\n return len(self.loaded_data)\n \n def __getitem__(self, idx):\n #print(\"idx:\",idx)\n data = self.loaded_data[idx]\n\n #img_id = data['file_name']\n img_id = data['img_id']\n \n #print(\"img_id:\",img_id)\n #sent = data['license']\n sent = data['sents']\n image_path = os.path.join(self.root_path, f'{img_id[:27]}.jpg')\n # print(\"image_path:\",image_path)\n image = Image.open(image_path).convert('RGB')\n image = self.vis_processor(image)\n question = f\"[refer] give me the location of {sent}\"\n return image, question, img_id" } ]
import os import re import json import argparse import random import numpy as np import torch from collections import defaultdict from PIL import Image from tqdm import tqdm from torch.utils.data import DataLoader from minigpt4.common.config import Config from minigpt4.common.eval_utils import prepare_texts, init_model, eval_parser, computeIoU from minigpt4.conversation.conversation import CONV_VISION_minigptv2 from minigpt4.datasets.datasets.coco_caption import RefCOCOEvalData
3,340
def list_of_str(arg): return list(map(str, arg.split(','))) parser = eval_parser() parser.add_argument("--dataset", type=list_of_str, default='refcoco', help="dataset to evaluate") parser.add_argument("--res", type=float, default=100.0, help="resolution used in refcoco") parser.add_argument("--resample", action='store_true', help="resolution used in refcoco") args = parser.parse_args() cfg = Config(args) eval_dict = {'refcoco': ['val','testA','testB'], 'refcoco+': ['val','testA','testB'], 'refcocog': ['val','testA','testB']} model, vis_processor = init_model(args) model.eval() CONV_VISION = CONV_VISION_minigptv2 conv_temp = CONV_VISION.copy() conv_temp.system = "" model.eval() save_path = cfg.run_cfg.save_path for dataset in args.dataset: for split in eval_dict[dataset]: eval_file_path = cfg.evaluation_datasets_cfg[dataset]["eval_file_path"] img_path = cfg.evaluation_datasets_cfg[dataset]["img_path"] batch_size = cfg.evaluation_datasets_cfg[dataset]["batch_size"] max_new_tokens = cfg.evaluation_datasets_cfg[dataset]["max_new_tokens"] # with open(os.path.join(eval_file_path,f"{dataset}/{dataset}_{split}.json"), 'r') as f: # refcoco = json.load(f) print(eval_file_path) with open(eval_file_path,'r') as f: refcoco = json.load(f) #print("1111 here") #print(img_path) #print(refcoco)
def list_of_str(arg): return list(map(str, arg.split(','))) parser = eval_parser() parser.add_argument("--dataset", type=list_of_str, default='refcoco', help="dataset to evaluate") parser.add_argument("--res", type=float, default=100.0, help="resolution used in refcoco") parser.add_argument("--resample", action='store_true', help="resolution used in refcoco") args = parser.parse_args() cfg = Config(args) eval_dict = {'refcoco': ['val','testA','testB'], 'refcoco+': ['val','testA','testB'], 'refcocog': ['val','testA','testB']} model, vis_processor = init_model(args) model.eval() CONV_VISION = CONV_VISION_minigptv2 conv_temp = CONV_VISION.copy() conv_temp.system = "" model.eval() save_path = cfg.run_cfg.save_path for dataset in args.dataset: for split in eval_dict[dataset]: eval_file_path = cfg.evaluation_datasets_cfg[dataset]["eval_file_path"] img_path = cfg.evaluation_datasets_cfg[dataset]["img_path"] batch_size = cfg.evaluation_datasets_cfg[dataset]["batch_size"] max_new_tokens = cfg.evaluation_datasets_cfg[dataset]["max_new_tokens"] # with open(os.path.join(eval_file_path,f"{dataset}/{dataset}_{split}.json"), 'r') as f: # refcoco = json.load(f) print(eval_file_path) with open(eval_file_path,'r') as f: refcoco = json.load(f) #print("1111 here") #print(img_path) #print(refcoco)
data = RefCOCOEvalData(refcoco, vis_processor, img_path)
6
2023-12-28 05:47:18+00:00
4k
ali-vilab/dreamtalk
core/networks/disentangle_decoder.py
[ { "identifier": "PositionalEncoding", "path": "core/networks/transformer.py", "snippet": "class PositionalEncoding(nn.Module):\r\n\r\n def __init__(self, d_hid, n_position=200):\r\n super(PositionalEncoding, self).__init__()\r\n\r\n # Not a parameter\r\n self.register_buffer('pos_table', self._get_sinusoid_encoding_table(n_position, d_hid))\r\n\r\n def _get_sinusoid_encoding_table(self, n_position, d_hid):\r\n ''' Sinusoid position encoding table '''\r\n # TODO: make it with torch instead of numpy\r\n\r\n def get_position_angle_vec(position):\r\n return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]\r\n\r\n sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])\r\n sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i\r\n sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1\r\n\r\n return torch.FloatTensor(sinusoid_table).unsqueeze(0)\r\n\r\n def forward(self, winsize):\r\n return self.pos_table[:, :winsize].clone().detach()\r" }, { "identifier": "TransformerDecoderLayer", "path": "core/networks/transformer.py", "snippet": "class TransformerDecoderLayer(nn.Module):\r\n\r\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,\r\n activation=\"relu\", normalize_before=False):\r\n super().__init__()\r\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\r\n self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\r\n # Implementation of Feedforward model\r\n self.linear1 = nn.Linear(d_model, dim_feedforward)\r\n self.dropout = nn.Dropout(dropout)\r\n self.linear2 = nn.Linear(dim_feedforward, d_model)\r\n\r\n self.norm1 = nn.LayerNorm(d_model)\r\n self.norm2 = nn.LayerNorm(d_model)\r\n self.norm3 = nn.LayerNorm(d_model)\r\n self.dropout1 = nn.Dropout(dropout)\r\n self.dropout2 = nn.Dropout(dropout)\r\n self.dropout3 = nn.Dropout(dropout)\r\n\r\n self.activation = _get_activation_fn(activation)\r\n self.normalize_before = normalize_before\r\n\r\n def with_pos_embed(self, tensor, pos):\r\n return tensor if pos is None else tensor + pos\r\n\r\n def forward_post(self, tgt, memory,\r\n tgt_mask = None,\r\n memory_mask = None,\r\n tgt_key_padding_mask = None,\r\n memory_key_padding_mask = None,\r\n pos = None,\r\n query_pos = None):\r\n # q = k = self.with_pos_embed(tgt, query_pos)\r\n tgt2 = self.self_attn(tgt, tgt, value=tgt, attn_mask=tgt_mask,\r\n key_padding_mask=tgt_key_padding_mask)[0]\r\n tgt = tgt + self.dropout1(tgt2)\r\n tgt = self.norm1(tgt)\r\n tgt2 = self.multihead_attn(query=tgt,\r\n key=memory,\r\n value=memory, attn_mask=memory_mask,\r\n key_padding_mask=memory_key_padding_mask)[0]\r\n tgt = tgt + self.dropout2(tgt2)\r\n tgt = self.norm2(tgt)\r\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\r\n tgt = tgt + self.dropout3(tgt2)\r\n tgt = self.norm3(tgt)\r\n return tgt\r\n\r\n def forward_pre(self, tgt, memory,\r\n tgt_mask = None,\r\n memory_mask = None,\r\n tgt_key_padding_mask = None,\r\n memory_key_padding_mask = None,\r\n pos = None,\r\n query_pos = None):\r\n tgt2 = self.norm1(tgt)\r\n # q = k = self.with_pos_embed(tgt2, query_pos)\r\n tgt2 = self.self_attn(tgt2, tgt2, value=tgt2, attn_mask=tgt_mask,\r\n key_padding_mask=tgt_key_padding_mask)[0]\r\n tgt = tgt + self.dropout1(tgt2)\r\n tgt2 = self.norm2(tgt)\r\n tgt2 = self.multihead_attn(query=tgt2,\r\n key=memory,\r\n value=memory, attn_mask=memory_mask,\r\n key_padding_mask=memory_key_padding_mask)[0]\r\n tgt = tgt + self.dropout2(tgt2)\r\n tgt2 = self.norm3(tgt)\r\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))\r\n tgt = tgt + self.dropout3(tgt2)\r\n return tgt\r\n\r\n def forward(self, tgt, memory,\r\n tgt_mask = None,\r\n memory_mask = None,\r\n tgt_key_padding_mask = None,\r\n memory_key_padding_mask = None,\r\n pos = None,\r\n query_pos = None):\r\n if self.normalize_before:\r\n return self.forward_pre(tgt, memory, tgt_mask, memory_mask,\r\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)\r\n return self.forward_post(tgt, memory, tgt_mask, memory_mask,\r\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)\r" }, { "identifier": "TransformerDecoder", "path": "core/networks/transformer.py", "snippet": "class TransformerDecoder(nn.Module):\r\n\r\n def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):\r\n super().__init__()\r\n self.layers = _get_clones(decoder_layer, num_layers)\r\n self.num_layers = num_layers\r\n self.norm = norm\r\n self.return_intermediate = return_intermediate\r\n\r\n def forward(self, tgt, memory, tgt_mask = None, memory_mask = None, tgt_key_padding_mask = None,\r\n memory_key_padding_mask = None,\r\n pos = None,\r\n query_pos = None):\r\n output = tgt+pos+query_pos\r\n\r\n intermediate = []\r\n\r\n for layer in self.layers:\r\n output = layer(output, memory, tgt_mask=tgt_mask,\r\n memory_mask=memory_mask,\r\n tgt_key_padding_mask=tgt_key_padding_mask,\r\n memory_key_padding_mask=memory_key_padding_mask,\r\n pos=pos, query_pos=query_pos)\r\n if self.return_intermediate:\r\n intermediate.append(self.norm(output))\r\n\r\n if self.norm is not None:\r\n output = self.norm(output)\r\n if self.return_intermediate:\r\n intermediate.pop()\r\n intermediate.append(output)\r\n\r\n if self.return_intermediate:\r\n return torch.stack(intermediate)\r\n\r\n return output.unsqueeze(0)\r" }, { "identifier": "DynamicFCDecoderLayer", "path": "core/networks/dynamic_fc_decoder.py", "snippet": "class DynamicFCDecoderLayer(nn.Module):\n def __init__(\n self,\n d_model,\n nhead,\n d_style,\n dynamic_K,\n dynamic_ratio,\n dim_feedforward=2048,\n dropout=0.1,\n activation=\"relu\",\n normalize_before=False,\n ):\n super().__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n # self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.linear1 = DynamicLinear(d_model, dim_feedforward, d_style, K=dynamic_K, ratio=dynamic_ratio)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n # self.linear2 = DynamicLinear(dim_feedforward, d_model, d_style, K=dynamic_K, ratio=dynamic_ratio)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.norm3 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n self.normalize_before = normalize_before\n\n def with_pos_embed(self, tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward_post(\n self,\n tgt,\n memory,\n style,\n tgt_mask=None,\n memory_mask=None,\n tgt_key_padding_mask=None,\n memory_key_padding_mask=None,\n pos=None,\n query_pos=None,\n ):\n # q = k = self.with_pos_embed(tgt, query_pos)\n tgt2 = self.self_attn(tgt, tgt, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n tgt2 = self.multihead_attn(\n query=tgt, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask\n )[0]\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n # tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt, style))), style)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt, style))))\n tgt = tgt + self.dropout3(tgt2)\n tgt = self.norm3(tgt)\n return tgt\n\n # def forward_pre(\n # self,\n # tgt,\n # memory,\n # tgt_mask=None,\n # memory_mask=None,\n # tgt_key_padding_mask=None,\n # memory_key_padding_mask=None,\n # pos=None,\n # query_pos=None,\n # ):\n # tgt2 = self.norm1(tgt)\n # # q = k = self.with_pos_embed(tgt2, query_pos)\n # tgt2 = self.self_attn(tgt2, tgt2, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]\n # tgt = tgt + self.dropout1(tgt2)\n # tgt2 = self.norm2(tgt)\n # tgt2 = self.multihead_attn(\n # query=tgt2, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask\n # )[0]\n # tgt = tgt + self.dropout2(tgt2)\n # tgt2 = self.norm3(tgt)\n # tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))\n # tgt = tgt + self.dropout3(tgt2)\n # return tgt\n\n def forward(\n self,\n tgt,\n memory,\n style,\n tgt_mask=None,\n memory_mask=None,\n tgt_key_padding_mask=None,\n memory_key_padding_mask=None,\n pos=None,\n query_pos=None,\n ):\n if self.normalize_before:\n raise NotImplementedError\n # return self.forward_pre(\n # tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos\n # )\n return self.forward_post(\n tgt, memory, style, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos\n )" }, { "identifier": "DynamicFCDecoder", "path": "core/networks/dynamic_fc_decoder.py", "snippet": "class DynamicFCDecoder(nn.Module):\n def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):\n super().__init__()\n self.layers = _get_clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n self.return_intermediate = return_intermediate\n\n def forward(\n self,\n tgt,\n memory,\n tgt_mask=None,\n memory_mask=None,\n tgt_key_padding_mask=None,\n memory_key_padding_mask=None,\n pos=None,\n query_pos=None,\n ):\n style = query_pos[0]\n # (B*N, C)\n output = tgt + pos + query_pos\n\n intermediate = []\n\n for layer in self.layers:\n output = layer(\n output,\n memory,\n style,\n tgt_mask=tgt_mask,\n memory_mask=memory_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask,\n pos=pos,\n query_pos=query_pos,\n )\n if self.return_intermediate:\n intermediate.append(self.norm(output))\n\n if self.norm is not None:\n output = self.norm(output)\n if self.return_intermediate:\n intermediate.pop()\n intermediate.append(output)\n\n if self.return_intermediate:\n return torch.stack(intermediate)\n\n return output.unsqueeze(0)" }, { "identifier": "_reset_parameters", "path": "core/utils.py", "snippet": "def _reset_parameters(model):\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)" } ]
import torch import sys from torch import nn from .transformer import ( PositionalEncoding, TransformerDecoderLayer, TransformerDecoder, ) from core.networks.dynamic_fc_decoder import DynamicFCDecoderLayer, DynamicFCDecoder from core.utils import _reset_parameters from configs.default import get_cfg_defaults
3,260
def get_decoder_network( network_type, d_model, nhead, dim_feedforward, dropout, activation, normalize_before, num_decoder_layers, return_intermediate_dec, dynamic_K, dynamic_ratio, ): decoder = None if network_type == "TransformerDecoder": decoder_layer = TransformerDecoderLayer( d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) norm = nn.LayerNorm(d_model) decoder = TransformerDecoder( decoder_layer, num_decoder_layers, norm, return_intermediate_dec, ) elif network_type == "DynamicFCDecoder": d_style = d_model
def get_decoder_network( network_type, d_model, nhead, dim_feedforward, dropout, activation, normalize_before, num_decoder_layers, return_intermediate_dec, dynamic_K, dynamic_ratio, ): decoder = None if network_type == "TransformerDecoder": decoder_layer = TransformerDecoderLayer( d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) norm = nn.LayerNorm(d_model) decoder = TransformerDecoder( decoder_layer, num_decoder_layers, norm, return_intermediate_dec, ) elif network_type == "DynamicFCDecoder": d_style = d_model
decoder_layer = DynamicFCDecoderLayer(
3
2023-12-28 05:39:31+00:00
4k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/utils/testing_utils.py
[ { "identifier": "BACKENDS_MAPPING", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "BACKENDS_MAPPING = OrderedDict(\n [\n (\"bs4\", (is_bs4_available, BS4_IMPORT_ERROR)),\n (\"flax\", (is_flax_available, FLAX_IMPORT_ERROR)),\n (\"inflect\", (is_inflect_available, INFLECT_IMPORT_ERROR)),\n (\"onnx\", (is_onnx_available, ONNX_IMPORT_ERROR)),\n (\"opencv\", (is_opencv_available, OPENCV_IMPORT_ERROR)),\n (\"scipy\", (is_scipy_available, SCIPY_IMPORT_ERROR)),\n (\"torch\", (is_torch_available, PYTORCH_IMPORT_ERROR)),\n (\"transformers\", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)),\n (\"unidecode\", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)),\n (\"librosa\", (is_librosa_available, LIBROSA_IMPORT_ERROR)),\n (\"k_diffusion\", (is_k_diffusion_available, K_DIFFUSION_IMPORT_ERROR)),\n (\"note_seq\", (is_note_seq_available, NOTE_SEQ_IMPORT_ERROR)),\n (\"wandb\", (is_wandb_available, WANDB_IMPORT_ERROR)),\n (\"omegaconf\", (is_omegaconf_available, OMEGACONF_IMPORT_ERROR)),\n (\"tensorboard\", (is_tensorboard_available, TENSORBOARD_IMPORT_ERROR)),\n (\"compel\", (is_compel_available, COMPEL_IMPORT_ERROR)),\n (\"ftfy\", (is_ftfy_available, FTFY_IMPORT_ERROR)),\n (\"torchsde\", (is_torchsde_available, TORCHSDE_IMPORT_ERROR)),\n (\"invisible_watermark\", (is_invisible_watermark_available, INVISIBLE_WATERMARK_IMPORT_ERROR)),\n ]\n)" }, { "identifier": "is_compel_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_compel_available():\n return _compel_available" }, { "identifier": "is_flax_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_flax_available():\n return _flax_available" }, { "identifier": "is_note_seq_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_note_seq_available():\n return _note_seq_available" }, { "identifier": "is_onnx_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_onnx_available():\n return _onnx_available" }, { "identifier": "is_opencv_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_opencv_available():\n return _opencv_available" }, { "identifier": "is_peft_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_peft_available():\n return _peft_available" }, { "identifier": "is_torch_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_torch_available():\n return _torch_available" }, { "identifier": "is_torch_version", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_torch_version(operation: str, version: str):\n \"\"\"\n Args:\n Compares the current PyTorch version to a given reference with an operation.\n operation (`str`):\n A string representation of an operator, such as `\">\"` or `\"<=\"`\n version (`str`):\n A string version of PyTorch\n \"\"\"\n return compare_versions(parse(_torch_version), operation, version)" }, { "identifier": "is_torchsde_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_torchsde_available():\n return _torchsde_available" }, { "identifier": "is_transformers_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_transformers_available():\n return _transformers_available" }, { "identifier": "get_logger", "path": "diffusers/src/diffusers/utils/logging.py", "snippet": "def get_logger(name: Optional[str] = None) -> logging.Logger:\n \"\"\"\n Return a logger with the specified name.\n\n This function is not supposed to be directly accessed unless you are writing a custom diffusers module.\n \"\"\"\n\n if name is None:\n name = _get_library_name()\n\n _configure_library_root_logger()\n return logging.getLogger(name)" } ]
import functools import importlib import inspect import io import logging import multiprocessing import os import random import re import struct import sys import tempfile import time import unittest import urllib.parse import numpy as np import PIL.Image import PIL.ImageOps import requests import torch import cv2 from contextlib import contextmanager from distutils.util import strtobool from io import BytesIO, StringIO from pathlib import Path from typing import List, Optional, Union from numpy.linalg import norm from packaging import version from .import_utils import ( BACKENDS_MAPPING, is_compel_available, is_flax_available, is_note_seq_available, is_onnx_available, is_opencv_available, is_peft_available, is_torch_available, is_torch_version, is_torchsde_available, is_transformers_available, ) from .logging import get_logger from _pytest.config import create_terminal_writer
2,574
tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "") # format is usually: # expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161]) output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array") test_file, test_class, test_fn = test_name.split("::") test_fn = test_fn.split()[0] with open(filename, "a") as f: print(";".join([test_file, test_class, test_fn, output_str]), file=f) def get_tests_dir(append_path=None): """ Args: append_path: optional path to append to the tests dir path Return: The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is joined after the `tests` dir the former is provided. """ # this function caller's __file__ caller__file__ = inspect.stack()[1][1] tests_dir = os.path.abspath(os.path.dirname(caller__file__)) while not tests_dir.endswith("tests"): tests_dir = os.path.dirname(tests_dir) if append_path: return os.path.join(tests_dir, append_path) else: return tests_dir def parse_flag_from_env(key, default=False): try: value = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _value = default else: # KEY is set, convert it to True or False. try: _value = strtobool(value) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no.") return _value _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) _run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False) def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous() def slow(test_case): """ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. """ return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) def nightly(test_case): """ Decorator marking a test that runs nightly in the diffusers CI. Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them. """ return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case) def require_torch(test_case): """ Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. """ return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case) def require_torch_2(test_case): """ Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed. """ return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")( test_case ) def require_torch_gpu(test_case): """Decorator marking a test that requires CUDA and PyTorch.""" return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")( test_case ) def skip_mps(test_case): """Decorator marking a test to skip if torch_device is 'mps'""" return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case) def require_flax(test_case): """ Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed """
global_rng = random.Random() logger = get_logger(__name__) _required_peft_version = is_peft_available() and version.parse( version.parse(importlib.metadata.version("peft")).base_version ) > version.parse("0.5") _required_transformers_version = is_transformers_available() and version.parse( version.parse(importlib.metadata.version("transformers")).base_version ) > version.parse("4.33") USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version if is_torch_available(): if "DIFFUSERS_TEST_DEVICE" in os.environ: torch_device = os.environ["DIFFUSERS_TEST_DEVICE"] try: # try creating device to see if provided device is valid _ = torch.device(torch_device) except RuntimeError as e: raise RuntimeError( f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}" ) from e logger.info(f"torch_device overrode to {torch_device}") else: torch_device = "cuda" if torch.cuda.is_available() else "cpu" is_torch_higher_equal_than_1_12 = version.parse( version.parse(torch.__version__).base_version ) >= version.parse("1.12") if is_torch_higher_equal_than_1_12: # Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details mps_backend_registered = hasattr(torch.backends, "mps") torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device def torch_all_close(a, b, *args, **kwargs): if not is_torch_available(): raise ValueError("PyTorch needs to be installed to use this function.") if not torch.allclose(a, b, *args, **kwargs): assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}." return True def numpy_cosine_similarity_distance(a, b): similarity = np.dot(a, b) / (norm(a) * norm(b)) distance = 1.0 - similarity.mean() return distance def print_tensor_test(tensor, filename="test_corrections.txt", expected_tensor_name="expected_slice"): test_name = os.environ.get("PYTEST_CURRENT_TEST") if not torch.is_tensor(tensor): tensor = torch.from_numpy(tensor) tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "") # format is usually: # expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161]) output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array") test_file, test_class, test_fn = test_name.split("::") test_fn = test_fn.split()[0] with open(filename, "a") as f: print(";".join([test_file, test_class, test_fn, output_str]), file=f) def get_tests_dir(append_path=None): """ Args: append_path: optional path to append to the tests dir path Return: The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is joined after the `tests` dir the former is provided. """ # this function caller's __file__ caller__file__ = inspect.stack()[1][1] tests_dir = os.path.abspath(os.path.dirname(caller__file__)) while not tests_dir.endswith("tests"): tests_dir = os.path.dirname(tests_dir) if append_path: return os.path.join(tests_dir, append_path) else: return tests_dir def parse_flag_from_env(key, default=False): try: value = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _value = default else: # KEY is set, convert it to True or False. try: _value = strtobool(value) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no.") return _value _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) _run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False) def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous() def slow(test_case): """ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. """ return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) def nightly(test_case): """ Decorator marking a test that runs nightly in the diffusers CI. Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them. """ return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case) def require_torch(test_case): """ Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. """ return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case) def require_torch_2(test_case): """ Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed. """ return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")( test_case ) def require_torch_gpu(test_case): """Decorator marking a test that requires CUDA and PyTorch.""" return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")( test_case ) def skip_mps(test_case): """Decorator marking a test to skip if torch_device is 'mps'""" return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case) def require_flax(test_case): """ Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed """
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
2
2023-12-28 08:17:40+00:00
4k
oppo-us-research/SpacetimeGaussians
thirdparty/gaussian_splatting/scene/ourslite.py
[ { "identifier": "getcolormodel", "path": "helper_model.py", "snippet": "def getcolormodel(rgbfuntion):\n if rgbfuntion == \"sandwich\":\n rgbdecoder = Sandwich(9,3)\n \n elif rgbfuntion == \"sandwichnoact\":\n rgbdecoder = Sandwichnoact(9,3)\n else :\n return None \n return rgbdecoder" }, { "identifier": "interpolate_point", "path": "helper_model.py", "snippet": "def interpolate_point(pcd, N=4):\n \n oldxyz = pcd.points\n oldcolor = pcd.colors\n oldnormal = pcd.normals\n oldtime = pcd.times\n \n timestamps = np.unique(oldtime)\n\n\n newxyz = []\n newcolor = []\n newnormal = []\n newtime = []\n for timeidx, time in enumerate(timestamps):\n selectedmask = oldtime == time\n selectedmask = selectedmask.squeeze(1)\n \n if timeidx == 0:\n newxyz.append(oldxyz[selectedmask])\n newcolor.append(oldcolor[selectedmask])\n newnormal.append(oldnormal[selectedmask])\n newtime.append(oldtime[selectedmask])\n else:\n xyzinput = oldxyz[selectedmask]\n xyzinput = torch.from_numpy(xyzinput).float().cuda()\n xyzinput = xyzinput.unsqueeze(0).contiguous() # 1 x N x 3\n xyznnpoints = knn(2, xyzinput, xyzinput, False)\n\n nearestneibourindx = xyznnpoints[0, 1].long() # N x 1 \n spatialdistance = torch.norm(xyzinput - xyzinput[:,nearestneibourindx,:], dim=2) # 1 x N\n spatialdistance = spatialdistance.squeeze(0)\n\n diff_sorted, _ = torch.sort(spatialdistance) \n N = spatialdistance.shape[0]\n num_take = int(N * 0.25)\n masks = spatialdistance > diff_sorted[-num_take]\n masksnumpy = masks.cpu().numpy()\n\n newxyz.append(oldxyz[selectedmask][masksnumpy])\n newcolor.append(oldcolor[selectedmask][masksnumpy])\n newnormal.append(oldnormal[selectedmask][masksnumpy])\n newtime.append(oldtime[selectedmask][masksnumpy])\n #\n newxyz = np.concatenate(newxyz, axis=0)\n newcolor = np.concatenate(newcolor, axis=0)\n newtime = np.concatenate(newtime, axis=0)\n assert newxyz.shape[0] == newcolor.shape[0] \n\n\n newpcd = BasicPointCloud(points=newxyz, colors=newcolor, normals=None, times=newtime)\n\n return newpcd" }, { "identifier": "interpolate_partuse", "path": "helper_model.py", "snippet": "def interpolate_partuse(pcd, N=4):\n # used in ablation study\n oldxyz = pcd.points\n oldcolor = pcd.colors\n oldnormal = pcd.normals\n oldtime = pcd.times\n \n timestamps = np.unique(oldtime)\n\n newxyz = []\n newcolor = []\n newnormal = []\n newtime = []\n for timeidx, time in enumerate(timestamps):\n selectedmask = oldtime == time\n selectedmask = selectedmask.squeeze(1)\n \n if timeidx % N == 0:\n newxyz.append(oldxyz[selectedmask])\n newcolor.append(oldcolor[selectedmask])\n newnormal.append(oldnormal[selectedmask])\n newtime.append(oldtime[selectedmask])\n\n else:\n pass\n #\n newxyz = np.concatenate(newxyz, axis=0)\n newcolor = np.concatenate(newcolor, axis=0)\n newtime = np.concatenate(newtime, axis=0)\n assert newxyz.shape[0] == newcolor.shape[0] \n\n newpcd = BasicPointCloud(points=newxyz, colors=newcolor, normals=None, times=newtime)\n\n return newpcd" }, { "identifier": "interpolate_pointv3", "path": "helper_model.py", "snippet": "def interpolate_pointv3(pcd, N=4,m=0.25):\n \n oldxyz = pcd.points\n oldcolor = pcd.colors\n oldnormal = pcd.normals\n oldtime = pcd.times\n \n timestamps = np.unique(oldtime)\n\n\n newxyz = []\n newcolor = []\n newnormal = []\n newtime = []\n for timeidx, time in enumerate(timestamps):\n selectedmask = oldtime == time\n selectedmask = selectedmask.squeeze(1)\n \n if timeidx % N == 0:\n newxyz.append(oldxyz[selectedmask])\n newcolor.append(oldcolor[selectedmask])\n newnormal.append(oldnormal[selectedmask])\n newtime.append(oldtime[selectedmask])\n\n else:\n xyzinput = oldxyz[selectedmask]\n xyzinput = torch.from_numpy(xyzinput).float().cuda()\n xyzinput = xyzinput.unsqueeze(0).contiguous() # 1 x N x 3\n xyznnpoints = knn(2, xyzinput, xyzinput, False)\n\n nearestneibourindx = xyznnpoints[0, 1].long() # N x 1 skip the first one, we select the second closest one\n spatialdistance = torch.norm(xyzinput - xyzinput[:,nearestneibourindx,:], dim=2) # 1 x N\n spatialdistance = spatialdistance.squeeze(0)\n\n diff_sorted, _ = torch.sort(spatialdistance) \n M = spatialdistance.shape[0]\n num_take = int(M * m)\n masks = spatialdistance > diff_sorted[-num_take]\n masksnumpy = masks.cpu().numpy()\n\n newxyz.append(oldxyz[selectedmask][masksnumpy])\n newcolor.append(oldcolor[selectedmask][masksnumpy])\n newnormal.append(oldnormal[selectedmask][masksnumpy])\n newtime.append(oldtime[selectedmask][masksnumpy])\n #\n newxyz = np.concatenate(newxyz, axis=0)\n newcolor = np.concatenate(newcolor, axis=0)\n newtime = np.concatenate(newtime, axis=0)\n assert newxyz.shape[0] == newcolor.shape[0] \n\n\n newpcd = BasicPointCloud(points=newxyz, colors=newcolor, normals=None, times=newtime)\n\n return newpcd" } ]
import torch import numpy as np import os from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation from torch import nn from utils.system_utils import mkdir_p from plyfile import PlyData, PlyElement from simple_knn._C import distCUDA2 from utils.graphics_utils import BasicPointCloud from utils.general_utils import strip_symmetric, build_scaling_rotation, update_quaternion from helper_model import getcolormodel, interpolate_point, interpolate_partuse,interpolate_pointv3
2,770
self._rotation = torch.empty(0) self._opacity = torch.empty(0) self.max_radii2D = torch.empty(0) self.xyz_gradient_accum = torch.empty(0) self.denom = torch.empty(0) self._motion = torch.empty(0) self.optimizer = None self.percent_dense = 0 self.spatial_lr_scale = 0 self._omega = torch.empty(0) self.rgbdecoder = getcolormodel(rgbfuntion) self.setup_functions() self.delta_t = None self.omegamask = None self.maskforems = None self.distancetocamera = None self.trbfslinit = None self.ts = None self.trbfoutput = None self.preprocesspoints = False self.addsphpointsscale = 0.8 self.maxz, self.minz = 0.0 , 0.0 self.maxy, self.miny = 0.0 , 0.0 self.maxx, self.minx = 0.0 , 0.0 self.computedtrbfscale = None self.computedopacity = None self.raystart = 0.7 def capture(self): return ( self.active_sh_degree, self._xyz, self._features_dc, self._scaling, self._rotation, self._opacity, self.max_radii2D, self.xyz_gradient_accum, self.denom, self.optimizer.state_dict(), self.spatial_lr_scale, ) def restore(self, model_args, training_args): (self.active_sh_degree, self._xyz, self._features_dc, self._scaling, self._rotation, self._opacity, self.max_radii2D, xyz_gradient_accum, denom, opt_dict, self.spatial_lr_scale) = model_args self.training_setup(training_args) self.xyz_gradient_accum = xyz_gradient_accum self.denom = denom self.optimizer.load_state_dict(opt_dict) @property def get_scaling(self): return self.scaling_activation(self._scaling) def get_rotation(self, delta_t): rotation = self._rotation + delta_t*self._omega self.delta_t = delta_t return self.rotation_activation(rotation) @property def get_xyz(self): return self._xyz @property def get_trbfcenter(self): return self._trbf_center @property def get_trbfscale(self): return self._trbf_scale def get_features(self, deltat): return self._features_dc @property def get_opacity(self): return self.opacity_activation(self._opacity) def get_covariance(self, scaling_modifier = 1): return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation) def oneupSHdegree(self): if self.active_sh_degree < self.max_sh_degree: self.active_sh_degree += 1 def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float): if self.preprocesspoints == 3: pcd = interpolate_point(pcd, 4) elif self.preprocesspoints == 4: pcd = interpolate_point(pcd, 2) elif self.preprocesspoints == 5: pcd = interpolate_point(pcd, 6) elif self.preprocesspoints == 6: pcd = interpolate_point(pcd, 8) elif self.preprocesspoints == 7: pcd = interpolate_point(pcd, 16) elif self.preprocesspoints == 8: pcd = interpolate_pointv3(pcd, 4) elif self.preprocesspoints == 14:
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class GaussianModel: def setup_functions(self): def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation): L = build_scaling_rotation(scaling_modifier * scaling, rotation) actual_covariance = L @ L.transpose(1, 2) symm = strip_symmetric(actual_covariance) return symm self.scaling_activation = torch.exp self.scaling_inverse_activation = torch.log self.covariance_activation = build_covariance_from_scaling_rotation self.opacity_activation = torch.sigmoid self.inverse_opacity_activation = inverse_sigmoid self.rotation_activation = torch.nn.functional.normalize #self.featureact = torch.sigmoid def __init__(self, sh_degree : int, rgbfuntion="rgbv1"): self.active_sh_degree = 0 self.max_sh_degree = sh_degree self._xyz = torch.empty(0) self._features_dc = torch.empty(0) # self._features_rest = torch.empty(0) self._scaling = torch.empty(0) self._rotation = torch.empty(0) self._opacity = torch.empty(0) self.max_radii2D = torch.empty(0) self.xyz_gradient_accum = torch.empty(0) self.denom = torch.empty(0) self._motion = torch.empty(0) self.optimizer = None self.percent_dense = 0 self.spatial_lr_scale = 0 self._omega = torch.empty(0) self.rgbdecoder = getcolormodel(rgbfuntion) self.setup_functions() self.delta_t = None self.omegamask = None self.maskforems = None self.distancetocamera = None self.trbfslinit = None self.ts = None self.trbfoutput = None self.preprocesspoints = False self.addsphpointsscale = 0.8 self.maxz, self.minz = 0.0 , 0.0 self.maxy, self.miny = 0.0 , 0.0 self.maxx, self.minx = 0.0 , 0.0 self.computedtrbfscale = None self.computedopacity = None self.raystart = 0.7 def capture(self): return ( self.active_sh_degree, self._xyz, self._features_dc, self._scaling, self._rotation, self._opacity, self.max_radii2D, self.xyz_gradient_accum, self.denom, self.optimizer.state_dict(), self.spatial_lr_scale, ) def restore(self, model_args, training_args): (self.active_sh_degree, self._xyz, self._features_dc, self._scaling, self._rotation, self._opacity, self.max_radii2D, xyz_gradient_accum, denom, opt_dict, self.spatial_lr_scale) = model_args self.training_setup(training_args) self.xyz_gradient_accum = xyz_gradient_accum self.denom = denom self.optimizer.load_state_dict(opt_dict) @property def get_scaling(self): return self.scaling_activation(self._scaling) def get_rotation(self, delta_t): rotation = self._rotation + delta_t*self._omega self.delta_t = delta_t return self.rotation_activation(rotation) @property def get_xyz(self): return self._xyz @property def get_trbfcenter(self): return self._trbf_center @property def get_trbfscale(self): return self._trbf_scale def get_features(self, deltat): return self._features_dc @property def get_opacity(self): return self.opacity_activation(self._opacity) def get_covariance(self, scaling_modifier = 1): return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation) def oneupSHdegree(self): if self.active_sh_degree < self.max_sh_degree: self.active_sh_degree += 1 def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float): if self.preprocesspoints == 3: pcd = interpolate_point(pcd, 4) elif self.preprocesspoints == 4: pcd = interpolate_point(pcd, 2) elif self.preprocesspoints == 5: pcd = interpolate_point(pcd, 6) elif self.preprocesspoints == 6: pcd = interpolate_point(pcd, 8) elif self.preprocesspoints == 7: pcd = interpolate_point(pcd, 16) elif self.preprocesspoints == 8: pcd = interpolate_pointv3(pcd, 4) elif self.preprocesspoints == 14:
pcd = interpolate_partuse(pcd, 2)
2
2023-12-28 04:16:32+00:00
4k
Meituan-AutoML/MobileVLM
scripts/inference.py
[ { "identifier": "load_pretrained_model", "path": "mobilevlm/model/mobilevlm.py", "snippet": "def load_pretrained_model(model_path, load_8bit=False, load_4bit=False, device_map=\"auto\", device=\"cuda\"):\n\n from mobilevlm.model.mobilellama import MobileLlamaForCausalLM\n\n kwargs = {\"device_map\": device_map}\n\n if load_8bit:\n kwargs['load_in_8bit'] = True\n elif load_4bit:\n kwargs['load_in_4bit'] = True\n kwargs['quantization_config'] = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4'\n )\n else:\n kwargs['torch_dtype'] = torch.float16\n \n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = MobileLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n\n mm_use_im_start_end = getattr(model.config, \"mm_use_im_start_end\", False)\n mm_use_im_patch_token = getattr(model.config, \"mm_use_im_patch_token\", True)\n if mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n if mm_use_im_start_end:\n tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)\n model.resize_token_embeddings(len(tokenizer))\n\n vision_tower = model.get_vision_tower()\n if not vision_tower.is_loaded:\n vision_tower.load_model()\n vision_tower.to(device=device, dtype=torch.float16)\n image_processor = vision_tower.image_processor\n\n if hasattr(model.config, \"max_sequence_length\"):\n context_len = model.config.max_sequence_length\n else:\n context_len = 2048\n \n return tokenizer, model, image_processor, context_len" }, { "identifier": "conv_templates", "path": "mobilevlm/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "disable_torch_init", "path": "mobilevlm/utils.py", "snippet": "def disable_torch_init():\n \"\"\"\n Disable the redundant torch default initialization to accelerate model creation.\n \"\"\"\n import torch\n setattr(torch.nn.Linear, \"reset_parameters\", lambda self: None)\n setattr(torch.nn.LayerNorm, \"reset_parameters\", lambda self: None)" }, { "identifier": "process_images", "path": "mobilevlm/utils.py", "snippet": "def process_images(images, image_processor, model_cfg):\n image_aspect_ratio = getattr(model_cfg, \"image_aspect_ratio\", None)\n new_images = []\n if image_aspect_ratio == 'pad':\n for image in images:\n image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))\n image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]\n new_images.append(image)\n else:\n return image_processor(images, return_tensors='pt')['pixel_values']\n if all(x.shape == new_images[0].shape for x in new_images):\n new_images = torch.stack(new_images, dim=0)\n return new_images" }, { "identifier": "tokenizer_image_token", "path": "mobilevlm/utils.py", "snippet": "def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids" }, { "identifier": "KeywordsStoppingCriteria", "path": "mobilevlm/utils.py", "snippet": "class KeywordsStoppingCriteria(StoppingCriteria):\n def __init__(self, keywords, tokenizer, input_ids):\n self.keywords = keywords\n self.keyword_ids = []\n self.max_keyword_len = 0\n for keyword in keywords:\n cur_keyword_ids = tokenizer(keyword).input_ids\n if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:\n cur_keyword_ids = cur_keyword_ids[1:]\n if len(cur_keyword_ids) > self.max_keyword_len:\n self.max_keyword_len = len(cur_keyword_ids)\n self.keyword_ids.append(torch.tensor(cur_keyword_ids))\n self.tokenizer = tokenizer\n self.start_len = input_ids.shape[1]\n\n def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n assert output_ids.shape[0] == 1, \"Only support batch size 1 (yet)\" # TODO\n offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len)\n self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]\n for keyword_id in self.keyword_ids:\n if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all():\n return True\n outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]\n for keyword in self.keywords:\n if keyword in outputs:\n return True\n return False" }, { "identifier": "IMAGE_TOKEN_INDEX", "path": "mobilevlm/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "mobilevlm/constants.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" } ]
import sys import torch import argparse from PIL import Image from pathlib import Path from mobilevlm.model.mobilevlm import load_pretrained_model from mobilevlm.conversation import conv_templates, SeparatorStyle from mobilevlm.utils import disable_torch_init, process_images, tokenizer_image_token, KeywordsStoppingCriteria from mobilevlm.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
1,683
sys.path.append(str(Path(__file__).parent.parent.resolve())) def inference_once(args): disable_torch_init() model_name = args.model_path.split('/')[-1] tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path) images = [Image.open(args.image_file).convert("RGB")]
sys.path.append(str(Path(__file__).parent.parent.resolve())) def inference_once(args): disable_torch_init() model_name = args.model_path.split('/')[-1] tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path) images = [Image.open(args.image_file).convert("RGB")]
images_tensor = process_images(images, image_processor, model.config).to(model.device, dtype=torch.float16)
3
2023-12-29 03:35:49+00:00
4k
kinggongzilla/ai-clone-whatsapp
utils/config_utils.py
[ { "identifier": "datasets", "path": "configs/datasets.py", "snippet": "class custom_dataset:" }, { "identifier": "lora_config", "path": "configs/peft.py", "snippet": "class lora_config:\n r: int=8\n lora_alpha: int=32\n target_modules: List[str] = field(default_factory=lambda: [\"q_proj\", \"v_proj\"])\n bias= \"none\"\n task_type: str= \"CAUSAL_LM\"\n lora_dropout: float=0.05\n inference_mode: bool = False" }, { "identifier": "llama_adapter_config", "path": "configs/peft.py", "snippet": "class llama_adapter_config:\n adapter_len: int= 10\n adapter_layers: int= 30\n task_type: str= \"CAUSAL_LM\"" }, { "identifier": "prefix_config", "path": "configs/peft.py", "snippet": "class prefix_config:\n num_virtual_tokens: int=30\n task_type: str= \"CAUSAL_LM\" " }, { "identifier": "train_config", "path": "configs/training.py", "snippet": "class train_config:\n whatsapp_username: str=\"\" # your own whatsapp user name as it is in the chat .txt files\n model_name: str=\"mistralai/Mistral-7B-Instruct-v0.2\"\n enable_fsdp: bool=False\n low_cpu_fsdp: bool=False\n run_validation: bool=False\n batch_size_training: int=1\n batching_strategy: str=\"packing\" #alternative: padding\n context_length: int=4096\n gradient_accumulation_steps: int=1\n gradient_clipping: bool = False\n gradient_clipping_threshold: float = 1.0\n num_epochs: int=1\n num_workers_dataloader: int=1\n lr: float=1e-4\n weight_decay: float=0.0\n gamma: float= 0.85\n seed: int=42\n use_fp16: bool=True\n mixed_precision: bool=True\n val_batch_size: int=1\n dataset = \"custom_dataset\"\n data_dir: str = \"data/preprocessing/processed_chats\"\n peft_method: str = \"lora\" # None , llama_adapter, prefix\n use_peft: bool=True\n output_dir: str = \"checkpoints\"\n freeze_layers: bool = False\n num_freeze_layers: int = 1\n quantization: bool = True\n one_gpu: bool = False\n save_model: bool = True\n dist_checkpoint_root_folder: str=\"PATH/to/save/FSDP/model\" # will be used if using FSDP\n dist_checkpoint_folder: str=\"fine-tuned\" # will be used if using FSDP\n save_optimizer: bool=False # will be used if using FSDP\n use_fast_kernels: bool = False # Enable using SDPA from PyTroch Accelerated Transformers, make use Flash Attention and Xformer memory-efficient kernels" }, { "identifier": "LengthBasedBatchSampler", "path": "data/sampler.py", "snippet": "class LengthBasedBatchSampler(torch.utils.data.BatchSampler):\n def __init__(self, data_source, batch_size: int, drop_last: bool, shuffle: bool=True) -> None:\n if isinstance(next(iter(data_source)), dict):\n first_key = next(iter(next(iter(data_source)).keys()))\n self.lengths = [len(d[first_key]) for d in data_source]\n else:\n self.lengths = [len(d) for d in data_source]\n self.batch_size = batch_size\n self.drop_last = drop_last\n self.shuffle = shuffle\n\n def __iter__(self):\n ids = np.argsort(self.lengths)\n if self.drop_last:\n ids = ids[:len(ids) // self.batch_size * self.batch_size]\n\n batches = [ids[i:i+self.batch_size] for i in range(0, len(ids), self.batch_size)]\n\n if self.shuffle:\n random.shuffle(batches)\n\n for b in batches:\n yield b\n\n def __len__(self):\n if self.drop_last:\n return len(self.lengths) // self.batch_size\n else:\n return len(self.lengths) // self.batch_size + (len(self.lengths) % self.batch_size > 0)" }, { "identifier": "DistributedLengthBasedBatchSampler", "path": "data/sampler.py", "snippet": "class DistributedLengthBasedBatchSampler(torch.utils.data.BatchSampler):\n def __init__(self, data_source, batch_size: int, num_replicas: int, rank: int, shuffle: bool = True, seed: int = 0) -> None:\n random.seed(seed)\n self.batch_sampler = LengthBasedBatchSampler(\n data_source, batch_size=batch_size, drop_last=True, shuffle=shuffle\n )\n self.num_replicas = num_replicas\n self.rank = rank\n \n def __iter__(self):\n max_length = len(self.batch_sampler) // self.num_replicas * self.num_replicas\n return islice(self.batch_sampler, self.rank, max_length, self.num_replicas)\n \n def __len__(self):\n return len(self.batch_sampler) // self.num_replicas" }, { "identifier": "DATASET_PREPROC", "path": "utils/dataset_utils.py", "snippet": "DATASET_PREPROC = {\n \"custom_dataset\": get_custom_dataset,\n}" } ]
import inspect import torch.distributed as dist from dataclasses import asdict from torch.utils.data import DistributedSampler from peft import ( LoraConfig, AdaptionPromptConfig, PrefixTuningConfig, ) from transformers import default_data_collator from transformers.data import DataCollatorForSeq2Seq from configs import datasets, lora_config, llama_adapter_config, prefix_config, train_config from data.sampler import LengthBasedBatchSampler, DistributedLengthBasedBatchSampler from utils.dataset_utils import DATASET_PREPROC
1,825
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. def update_config(config, **kwargs): if isinstance(config, (tuple, list)): for c in config: update_config(c, **kwargs) else: for k, v in kwargs.items(): if hasattr(config, k): setattr(config, k, v) elif "." in k: # allow --some_config.some_param=True config_name, param_name = k.split(".") if type(config).__name__ == config_name: if hasattr(config, param_name): setattr(config, param_name, v) else: # In case of specialized config we can warm user print(f"Warning: {config_name} does not accept parameter: {k}") elif isinstance(config, train_config): print(f"Warning: unknown parameter {k}") def generate_peft_config(train_config, kwargs): configs = (lora_config, llama_adapter_config, prefix_config) peft_configs = (LoraConfig, AdaptionPromptConfig, PrefixTuningConfig) names = tuple(c.__name__.rstrip("_config") for c in configs) assert train_config.peft_method in names, f"Peft config not found: {train_config.peft_method}" config = configs[names.index(train_config.peft_method)]() update_config(config, **kwargs) params = asdict(config) peft_config = peft_configs[names.index(train_config.peft_method)](**params) return peft_config def generate_dataset_config(train_config, kwargs): names = tuple(DATASET_PREPROC.keys()) assert train_config.dataset in names, f"Unknown dataset: {train_config.dataset}" dataset_config = {k:v for k, v in inspect.getmembers(datasets)}[train_config.dataset]() update_config(dataset_config, **kwargs) return dataset_config def get_dataloader_kwargs(train_config, dataset, tokenizer, mode): kwargs = {} batch_size = train_config.batch_size_training if mode=="train" else train_config.val_batch_size if train_config.batching_strategy == "padding": if train_config.enable_fsdp: kwargs["batch_sampler"] = DistributedLengthBasedBatchSampler( dataset, batch_size=batch_size, rank=dist.get_rank(), num_replicas=dist.get_world_size(), shuffle=mode=="train", ) else:
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. def update_config(config, **kwargs): if isinstance(config, (tuple, list)): for c in config: update_config(c, **kwargs) else: for k, v in kwargs.items(): if hasattr(config, k): setattr(config, k, v) elif "." in k: # allow --some_config.some_param=True config_name, param_name = k.split(".") if type(config).__name__ == config_name: if hasattr(config, param_name): setattr(config, param_name, v) else: # In case of specialized config we can warm user print(f"Warning: {config_name} does not accept parameter: {k}") elif isinstance(config, train_config): print(f"Warning: unknown parameter {k}") def generate_peft_config(train_config, kwargs): configs = (lora_config, llama_adapter_config, prefix_config) peft_configs = (LoraConfig, AdaptionPromptConfig, PrefixTuningConfig) names = tuple(c.__name__.rstrip("_config") for c in configs) assert train_config.peft_method in names, f"Peft config not found: {train_config.peft_method}" config = configs[names.index(train_config.peft_method)]() update_config(config, **kwargs) params = asdict(config) peft_config = peft_configs[names.index(train_config.peft_method)](**params) return peft_config def generate_dataset_config(train_config, kwargs): names = tuple(DATASET_PREPROC.keys()) assert train_config.dataset in names, f"Unknown dataset: {train_config.dataset}" dataset_config = {k:v for k, v in inspect.getmembers(datasets)}[train_config.dataset]() update_config(dataset_config, **kwargs) return dataset_config def get_dataloader_kwargs(train_config, dataset, tokenizer, mode): kwargs = {} batch_size = train_config.batch_size_training if mode=="train" else train_config.val_batch_size if train_config.batching_strategy == "padding": if train_config.enable_fsdp: kwargs["batch_sampler"] = DistributedLengthBasedBatchSampler( dataset, batch_size=batch_size, rank=dist.get_rank(), num_replicas=dist.get_world_size(), shuffle=mode=="train", ) else:
kwargs["batch_sampler"] = LengthBasedBatchSampler(dataset, batch_size, drop_last=True, shuffle=mode=="train")
5
2023-12-28 00:02:08+00:00
4k
FoundationVision/UniRef
projects/UniRef/uniref/models/deformable_detr/deformable_detr.py
[ { "identifier": "box_ops", "path": "projects/UniRef/uniref/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef multi_box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef generalized_multi_box_iou(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "NestedTensor", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n\n def to(self, device, non_blocking=False):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device, non_blocking=non_blocking)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device, non_blocking=non_blocking)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def record_stream(self, *args, **kwargs):\n self.tensors.record_stream(*args, **kwargs)\n if self.mask is not None:\n self.mask.record_stream(*args, **kwargs)\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)" }, { "identifier": "nested_tensor_from_tensor_list", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor], size_divisibility=1, split=True):\n if split:\n tensor_list = [tensor.split(3,dim=0) for tensor in tensor_list]\n tensor_list = [item for sublist in tensor_list for item in sublist]\n\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n\n if size_divisibility > 1:\n stride = size_divisibility\n # the last two dims are H,W, both subject to divisibility requirement\n max_size[-2] = (max_size[-2] + (stride - 1)) // stride * stride\n max_size[-1] = (max_size[-1] + (stride - 1)) // stride * stride\n\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)" }, { "identifier": "accuracy", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if float(torchvision.__version__[:3]) < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n if float(torchvision.__version__[:3]) < 0.5:\n return _NewEmptyTensorOp.apply(input, output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "is_dist_avail_and_initialized", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "inverse_sigmoid", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-5):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)" }, { "identifier": "build_backbone", "path": "projects/UniRef/uniref/models/deformable_detr/backbone.py", "snippet": "def build_backbone(args):\n position_embedding = build_position_encoding(args)\n train_backbone = args.lr_backbone > 0\n return_interm_layers = args.masks or (args.num_feature_levels > 1)\n backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)\n model = Joiner(backbone, position_embedding)\n return model" }, { "identifier": "build_matcher", "path": "projects/UniRef/uniref/models/deformable_detr/matcher.py", "snippet": "def build_matcher(args):\n return HungarianMatcher(cost_class=args.set_cost_class,\n cost_bbox=args.set_cost_bbox,\n cost_giou=args.set_cost_giou)" } ]
import torch import torch.nn.functional as F import math import copy from torch import nn from ...util import box_ops from ...util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid) from .backbone import build_backbone from .matcher import build_matcher
3,201
def __init__(self, cfg, backbone, transformer, num_classes, num_queries, num_feature_levels, aux_loss=True, with_box_refine=False, two_stage=False, mixed_selection=False, use_iou_branch=False): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. with_box_refine: iterative bounding box refinement two_stage: two-stage Deformable DETR """ super().__init__() self.num_queries = num_queries self.transformer = transformer hidden_dim = transformer.d_model self.class_embed = nn.Linear(hidden_dim, num_classes) self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) if use_iou_branch: self.iou_head = nn.Linear(hidden_dim, 1) self.num_feature_levels = num_feature_levels if not two_stage: self.query_embed = nn.Embedding(num_queries, hidden_dim*2) elif mixed_selection: self.query_embed = nn.Embedding(num_queries, hidden_dim) if num_feature_levels > 1: num_backbone_outs = len(backbone.strides) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.with_box_refine = with_box_refine self.two_stage = two_stage prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) if use_iou_branch: self.iou_head.bias.data = torch.ones(1) * bias_value self.class_embed.bias.data = torch.ones(num_classes) * bias_value nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) # if two-stage, the last class_embed and bbox_embed is for region proposal generation num_pred = (transformer.decoder.num_layers + 1) if two_stage else transformer.decoder.num_layers if with_box_refine: self.class_embed = _get_clones(self.class_embed, num_pred) self.bbox_embed = _get_clones(self.bbox_embed, num_pred) if use_iou_branch: self.iou_head = _get_clones(self.iou_head, num_pred-1) if two_stage else _get_clones(self.iou_head, num_pred) nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) # hack implementation for iterative bounding box refinement self.transformer.decoder.bbox_embed = self.bbox_embed else: nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) if use_iou_branch: self.iou_head = _get_clones(self.iou_head, num_pred-1) if two_stage else _get_clones(self.iou_head, num_pred) self.transformer.decoder.bbox_embed = None if two_stage: # hack implementation for two-stage self.transformer.decoder.class_embed = self.class_embed for box_embed in self.bbox_embed: nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0) self.mixed_selection = mixed_selection @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" @torch.no_grad() def forward(self, outputs, target_sizes): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2]
# ------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # ------------------------------------------------------------------------ """ Deformable DETR model and criterion classes. """ def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) class DeformableDETR(nn.Module): """ This is the Deformable DETR module that performs object detection """ def __init__(self, cfg, backbone, transformer, num_classes, num_queries, num_feature_levels, aux_loss=True, with_box_refine=False, two_stage=False, mixed_selection=False, use_iou_branch=False): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. with_box_refine: iterative bounding box refinement two_stage: two-stage Deformable DETR """ super().__init__() self.num_queries = num_queries self.transformer = transformer hidden_dim = transformer.d_model self.class_embed = nn.Linear(hidden_dim, num_classes) self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) if use_iou_branch: self.iou_head = nn.Linear(hidden_dim, 1) self.num_feature_levels = num_feature_levels if not two_stage: self.query_embed = nn.Embedding(num_queries, hidden_dim*2) elif mixed_selection: self.query_embed = nn.Embedding(num_queries, hidden_dim) if num_feature_levels > 1: num_backbone_outs = len(backbone.strides) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.with_box_refine = with_box_refine self.two_stage = two_stage prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) if use_iou_branch: self.iou_head.bias.data = torch.ones(1) * bias_value self.class_embed.bias.data = torch.ones(num_classes) * bias_value nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) # if two-stage, the last class_embed and bbox_embed is for region proposal generation num_pred = (transformer.decoder.num_layers + 1) if two_stage else transformer.decoder.num_layers if with_box_refine: self.class_embed = _get_clones(self.class_embed, num_pred) self.bbox_embed = _get_clones(self.bbox_embed, num_pred) if use_iou_branch: self.iou_head = _get_clones(self.iou_head, num_pred-1) if two_stage else _get_clones(self.iou_head, num_pred) nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) # hack implementation for iterative bounding box refinement self.transformer.decoder.bbox_embed = self.bbox_embed else: nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) if use_iou_branch: self.iou_head = _get_clones(self.iou_head, num_pred-1) if two_stage else _get_clones(self.iou_head, num_pred) self.transformer.decoder.bbox_embed = None if two_stage: # hack implementation for two-stage self.transformer.decoder.class_embed = self.class_embed for box_embed in self.bbox_embed: nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0) self.mixed_selection = mixed_selection @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" @torch.no_grad() def forward(self, outputs, target_sizes): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2]
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
0
2023-12-22 13:31:33+00:00
4k
xhuangcv/humannorm
threestudio/models/geometry/implicit_volume.py
[ { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "BaseImplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseImplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n isosurface: bool = True\n isosurface_method: str = \"mt\"\n isosurface_resolution: int = 128\n isosurface_threshold: Union[float, str] = 0.0\n isosurface_chunk: int = 0\n isosurface_coarse_to_fine: bool = True\n isosurface_deformable_grid: bool = False\n isosurface_remove_outliers: bool = True\n isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01\n use_sdf_loss: bool = False\n start_sdf_loss_step: int = 3000\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )\n self.isosurface_helper: Optional[IsosurfaceHelper] = None\n self.unbounded: bool = False\n\n def _initilize_isosurface_helper(self):\n if self.cfg.isosurface and self.isosurface_helper is None:\n if self.cfg.isosurface_method == \"mc-cpu\":\n self.isosurface_helper = MarchingCubeCPUHelper(\n self.cfg.isosurface_resolution\n ).to(self.device)\n elif self.cfg.isosurface_method == \"mt\":\n self.isosurface_helper = MarchingTetrahedraHelper(\n self.cfg.isosurface_resolution,\n f\"load/tets/{self.cfg.isosurface_resolution}_tets.npz\",\n ).to(self.device)\n else:\n raise AttributeError(\n \"Unknown isosurface method {self.cfg.isosurface_method}\"\n )\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n raise NotImplementedError\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]], Optional[Float[Tensor, \"*N 1\"]]]:\n # return the value of the implicit field, could be density / signed distance\n # also return a deformation field if the grid vertices can be optimized\n raise NotImplementedError\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n # return the value of the implicit field, where the zero level set represents the surface\n raise NotImplementedError\n\n def _isosurface(self, bbox: Float[Tensor, \"2 3\"], fine_stage: bool = False) -> Mesh:\n def batch_func(x):\n # scale to bbox as the input vertices are in [0, 1]\n field, deformation, sdf_loss = self.forward_field(\n scale_tensor(\n x.to(bbox.device), self.isosurface_helper.points_range, bbox\n ),\n )\n field = field.to(\n x.device\n ) # move to the same device as the input (could be CPU)\n if deformation is not None:\n deformation = deformation.to(x.device)\n return field, deformation, sdf_loss\n\n assert self.isosurface_helper is not None\n\n field, deformation, sdf_loss = chunk_batch(\n batch_func,\n self.cfg.isosurface_chunk,\n self.isosurface_helper.grid_vertices,\n )\n\n threshold: float\n\n if isinstance(self.cfg.isosurface_threshold, float):\n threshold = self.cfg.isosurface_threshold\n elif self.cfg.isosurface_threshold == \"auto\":\n eps = 1.0e-5\n threshold = field[field > eps].mean().item()\n threestudio.info(\n f\"Automatically determined isosurface threshold: {threshold}\"\n )\n else:\n raise TypeError(\n f\"Unknown isosurface_threshold {self.cfg.isosurface_threshold}\"\n )\n\n level = self.forward_level(field, threshold)\n mesh: Mesh = self.isosurface_helper(level, deformation=deformation)\n mesh.v_pos = scale_tensor(\n mesh.v_pos, self.isosurface_helper.points_range, bbox\n ) # scale to bbox as the grid vertices are in [0, 1]\n mesh.add_extra(\"bbox\", bbox)\n\n if self.cfg.isosurface_remove_outliers:\n # remove outliers components with small number of faces\n # only enabled when the mesh is not differentiable\n mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold)\n\n return mesh, sdf_loss\n\n def isosurface(self) -> Mesh:\n if not self.cfg.isosurface:\n raise NotImplementedError(\n \"Isosurface is not enabled in the current configuration\"\n )\n self._initilize_isosurface_helper()\n if self.cfg.isosurface_coarse_to_fine:\n threestudio.debug(\"First run isosurface to get a tight bounding box ...\")\n with torch.no_grad():\n mesh_coarse = self._isosurface(self.bbox)\n vmin, vmax = mesh_coarse.v_pos.amin(dim=0), mesh_coarse.v_pos.amax(dim=0)\n vmin_ = (vmin - (vmax - vmin) * 0.1).max(self.bbox[0])\n vmax_ = (vmax + (vmax - vmin) * 0.1).min(self.bbox[1])\n threestudio.debug(\"Run isosurface again with the tight bounding box ...\")\n mesh, sdf_loss = self._isosurface(torch.stack([vmin_, vmax_], dim=0), fine_stage=True)\n else:\n mesh, sdf_loss = self._isosurface(self.bbox)\n if self.cfg.use_sdf_loss:\n return mesh, sdf_loss\n else:\n return mesh" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "get_activation", "path": "threestudio/utils/ops.py", "snippet": "def get_activation(name) -> Callable:\n if name is None:\n return lambda x: x\n name = name.lower()\n if name == \"none\":\n return lambda x: x\n elif name == \"lin2srgb\":\n return lambda x: torch.where(\n x > 0.0031308,\n torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055,\n 12.92 * x,\n ).clamp(0.0, 1.0)\n elif name == \"exp\":\n return lambda x: torch.exp(x)\n elif name == \"shifted_exp\":\n return lambda x: torch.exp(x - 1.0)\n elif name == \"trunc_exp\":\n return trunc_exp\n elif name == \"shifted_trunc_exp\":\n return lambda x: trunc_exp(x - 1.0)\n elif name == \"sigmoid\":\n return lambda x: torch.sigmoid(x)\n elif name == \"tanh\":\n return lambda x: torch.tanh(x)\n elif name == \"shifted_softplus\":\n return lambda x: F.softplus(x - 1.0)\n elif name == \"scale_-11_01\":\n return lambda x: x * 0.5 + 0.5\n else:\n try:\n return getattr(F, name)\n except AttributeError:\n raise ValueError(f\"Unknown activation function: {name}\")" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseGeometry, BaseImplicitGeometry, contract_to_unisphere, ) from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import get_activation from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
3,023
@threestudio.register("implicit-volume") class ImplicitVolume(BaseImplicitGeometry): @dataclass class Config(BaseImplicitGeometry.Config): n_input_dims: int = 3 n_feature_dims: int = 3 density_activation: Optional[str] = "softplus" density_bias: Union[float, str] = "blob_magic3d" density_blob_scale: float = 10.0 density_blob_std: float = 0.5 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) normal_type: Optional[ str ] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian'] finite_difference_normal_eps: float = 0.01 # automatically determine the threshold isosurface_threshold: Union[float, str] = 25.0 cfg: Config def configure(self) -> None: super().configure()
@threestudio.register("implicit-volume") class ImplicitVolume(BaseImplicitGeometry): @dataclass class Config(BaseImplicitGeometry.Config): n_input_dims: int = 3 n_feature_dims: int = 3 density_activation: Optional[str] = "softplus" density_bias: Union[float, str] = "blob_magic3d" density_blob_scale: float = 10.0 density_blob_std: float = 0.5 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) normal_type: Optional[ str ] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian'] finite_difference_normal_eps: float = 0.01 # automatically determine the threshold isosurface_threshold: Union[float, str] = 25.0 cfg: Config def configure(self) -> None: super().configure()
self.encoding = get_encoding(
3
2023-12-23 12:37:48+00:00
4k
thooton/muse
main.py
[ { "identifier": "API_KEYS", "path": "config.py", "snippet": "API_KEYS = json.loads(config.get(\"Gemini\", \"API_KEYS\", fallback=\"[]\"))" }, { "identifier": "OUT_DIR", "path": "config.py", "snippet": "OUT_DIR = config.get(\"Misc\", \"OUT_DIR\", fallback=\"\")" }, { "identifier": "COUNT_PER_FILE", "path": "config.py", "snippet": "COUNT_PER_FILE = int(config.get(\"Misc\", \"COUNT_PER_FILE\", fallback=\"0\"))" }, { "identifier": "BEGIN_INDEX", "path": "config.py", "snippet": "BEGIN_INDEX = int(config.get(\"Misc\", \"BEGIN_INDEX\", fallback=\"0\"))" }, { "identifier": "VERBOSE_EXCEPTIONS", "path": "config.py", "snippet": "VERBOSE_EXCEPTIONS = BOOL_TABLE[config.get(\"Misc\", \"VERBOSE_EXCEPTIONS\", fallback=\"False\")]" }, { "identifier": "TEXT_DATASET", "path": "data_processing.py", "snippet": "TEXT_DATASET = {\n \"id\": \"WizardLM/WizardLM_evol_instruct_V2_196k\",\n \"split\": \"train\",\n \"iter\": lambda dataset: map(process_text_dataset, dataset),\n}" }, { "identifier": "CODE_DATASET", "path": "data_processing.py", "snippet": "CODE_DATASET = {\n \"id\": \"TokenBender/code_instructions_122k_alpaca_style\",\n \"split\": \"train\",\n \"iter\": lambda dataset: map(process_code_dataset, dataset),\n}" }, { "identifier": "TEMPLATES", "path": "data_processing.py", "snippet": "TEMPLATES = [\n {\n \"dataset\": \"text\",\n \"prompt\": lambda passage: f\"\"\"\nPlease consider the following passage: <passage>{passage}</passage>\nYou have two tasks:\n1) Drawing inspiration from the content of the passage, generate a brand new debate topic.\nThe debate topic will belong to the same domain as the content of the passage, but it will be even more rare.\nThe debate topic generated will be philosophical, creative, interesting, engaging, and thought-provoking.\nThe debate topic generated will not have an easy answer; it will be able to be argued from both sides.\nThe topic will be surrounded by <topic></topic> tags.\n2) Generate a debate on the generated topic between two rational individuals, Aspen and River.\nIn the debate, the participants will hold mutually opposing views.\nThe debate will be long and drawn-out; no side will give up easily.\nIn the debate, at times the participants may make concessions, but still hold fast to their point of view.\nIn the debate, the participants will use various techniques of rational discussion; they will not make use of emotionally manipulative techniques.\nIn the debate, the participants will never repeat themselves.\nThe debate will have at least 50 paragraphs; it will have at least 5000 words. It will be novel-length.\nFor the debate, you will be tipped $15 for every paragraph you write. To maximize your earnings, write as many as possible.\nThe debate will be formatted in Markdown.\nThe debate will be surrounded by <debate></debate> tags.\n \"\"\",\n \"extract\": lambda raw: (\n \"A debate on the topic \"\n + json.dumps(raw.split(\"<topic>\")[-1].split(\"</topic>\")[0].strip())\n + \":\\n\\n\"\n + raw.split(\"<debate>\")[-1].split(\"</debate>\")[0].strip()\n ),\n },\n {\n \"dataset\": \"text\",\n \"prompt\": lambda passage: f\"\"\"\nPlease consider the following passage: <passage>{passage}</passage>\nImagine you are a professor with a reputation for excellent lectures.\nYou have three tasks:\n1) Drawing inspiration from the content of the passage, generate a brand new lecture topic.\nThe lecture topic will belong to the same domain as the content of the passage, but it will be even more rare.\nThe lecture topic will be carefully chosen to advance the education of the students in every way.\nThe lecture topic will be interesting, engaging, and thought-provoking.\nThe lecture topic will be surrounded by <topic></topic> tags.\n2) Generate a ten-point lecture outline on the generated topic.\nThe lecture outline's ten points will be chosen to maximize ease of understanding and flow.\nThe lecture outline will be surrounded by <outline></outline> tags.\n3) Generate a lecture, following the outline, on the generated topic.\nThe lecture will be informative and easy to understand for the students.\nThe lecture will provide as much information as possible. It should be as long as possible.\nFor each piece of information you incorporate into the lecture, you will receive a tip of $20.\nIn the lecture, all unfamiliar terms or topics will be explained for the students' benefit.\nIn the lecture, it will be assumed that the students have no prior familiarity with the subject.\nIn the lecture, the lecturer will never repeat themselves unnecessarily.\nThe lecture will be formatted in Markdown.\nThe lecture will be surrounded by <lecture></lecture> tags.\n \"\"\",\n \"extract\": lambda raw: (\n raw.split(\"<lecture>\")[-1].split(\"</lecture>\")[0].strip()\n ),\n },\n {\n \"dataset\": \"code\",\n \"prompt\": lambda passage: f\"\"\"\nPlease consider the following passage: <passage_42>{passage}</passage_42>\nImagine you are a highly esteemed computer science professor writing a programming textbook.\nYou have three tasks:\n1) Drawing inspiration from the content of the passage, craft a brand new textbook section topic.\nThe section topic will belong to the same domain as the content of the passage, but it will be even more rare.\nThe section topic will be interesting, complex, and multifaceted, even if the passage is simple.\nThe section topic will be directly related to computer science.\nThe section topic will be carefully chosen to provide as much pedagogical value to the reader as possible.\nThe section topic will be surrounded by <topic_42></topic_42> tags.\n2) Generate a ten-point section outline with code on the generated topic.\nOf the section outline's ten points, at least three will be code examples illustrating the topic.\nThe section outline's ten points will be chosen to maximize ease of understanding and flow.\nThe section outline will be surrounded by <outline_42></outline_42> tags.\n3) Generate a textbook section, following the outline, on the generated topic.\nThe section will be self-contained, informative, easy to understand, and verbose.\nThe section will be written in longform prose.\nFor each piece of information you include, you will receive a payment of $20; thus, include as many as possible to maximize your earnings.\nThe section will explain all unfamiliar terms or topics for the reader's benefits.\nThe section will never repeat information or code.\nThe section will be formatted in Markdown.\nThe section will be surrounded by <section_42></section_42> tags.\n \"\"\",\n \"extract\": lambda raw: (\n raw.split(\"<section_42>\")[-1].split(\"</section_42>\")[0].strip()\n ),\n },\n]" }, { "identifier": "load_iter_from_spec", "path": "data_processing.py", "snippet": "def load_iter_from_spec(spec):\n return spec[\"iter\"](\n datasets.load_dataset(spec[\"id\"])[spec[\"split\"]].shuffle(\n seed=secrets.randbits(32)\n )\n )" }, { "identifier": "llm_template_query", "path": "llm_queries.py", "snippet": "async def llm_template_query(sess, template, passage, api_key):\n try:\n query = template[\"prompt\"](passage).strip()\n response = await llm_query(sess, query, api_key)\n return (\"ok\", template[\"extract\"](response))\n except Exception as exc:\n return (\"err\", {\"exc\": exc, \"api_key\": api_key})" } ]
import os import aiohttp import asyncio import secrets import json import huggingface_hub import traceback from tqdm import tqdm from config import API_KEYS, OUT_DIR, COUNT_PER_FILE, BEGIN_INDEX, VERBOSE_EXCEPTIONS from data_processing import TEXT_DATASET, CODE_DATASET, TEMPLATES, load_iter_from_spec from llm_queries import llm_template_query
1,960
def exc_fmt(exc): if VERBOSE_EXCEPTIONS: return "\n".join(traceback.format_exception(exc)).strip() else: return str(repr(exc)) async def main(): huggingface_hub.login(new_session=False) hf_api = huggingface_hub.HfApi() hf_user = hf_api.whoami()["name"] repo_id = f"{hf_user}/muse_textbooks" hf_api.create_repo(repo_id=repo_id, repo_type="dataset", exist_ok=True) text_iter = iter([]) code_iter = iter([]) sess = aiohttp.ClientSession() tasks = set() lines = 0 try: with open(os.path.join(OUT_DIR, "cur.jsonl"), "rb") as f: lines = len(f.read().decode("utf-8").split("\n")) - 1 except Exception: pass pbar = tqdm(initial=lines, total=COUNT_PER_FILE) outfile = open(os.path.join(OUT_DIR, "cur.jsonl"), "ab") while True: for api_key in API_KEYS:
def exc_fmt(exc): if VERBOSE_EXCEPTIONS: return "\n".join(traceback.format_exception(exc)).strip() else: return str(repr(exc)) async def main(): huggingface_hub.login(new_session=False) hf_api = huggingface_hub.HfApi() hf_user = hf_api.whoami()["name"] repo_id = f"{hf_user}/muse_textbooks" hf_api.create_repo(repo_id=repo_id, repo_type="dataset", exist_ok=True) text_iter = iter([]) code_iter = iter([]) sess = aiohttp.ClientSession() tasks = set() lines = 0 try: with open(os.path.join(OUT_DIR, "cur.jsonl"), "rb") as f: lines = len(f.read().decode("utf-8").split("\n")) - 1 except Exception: pass pbar = tqdm(initial=lines, total=COUNT_PER_FILE) outfile = open(os.path.join(OUT_DIR, "cur.jsonl"), "ab") while True: for api_key in API_KEYS:
template = TEMPLATES[secrets.randbits(64) % len(TEMPLATES)]
7
2023-12-26 03:41:10+00:00
4k
vithursant/nanoGPT_mlx
train.py
[ { "identifier": "GPTConfig", "path": "model.py", "snippet": "class GPTConfig:\n block_size: int = 1024\n vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency\n n_layer: int = 12\n n_head: int = 12\n n_embd: int = 768\n dropout: float = 0.0\n bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster" }, { "identifier": "GPT", "path": "model.py", "snippet": "class GPT(nn.Module):\n def __init__(self, config):\n \"\"\"\n Initializes a GPT (Generative Pre-trained Transformer) model.\n\n Args:\n config (GPTConfig): An instance of the configuration class\n specifying the hyperparameters for the GPT model.\n\n Attributes:\n config (GPTConfig): Configuration instance containing model hyperparameters.\n embedding (nn.Embedding): Embedding layer for input tokens.\n transformer (List[Block]): List of transformer blocks.\n out_proj (nn.Linear): Linear layer for output projection.\n \"\"\"\n super().__init__()\n\n assert config.vocab_size is not None\n assert config.block_size is not None\n self.config = config\n\n self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n self.wpe = nn.Embedding(config.block_size, config.n_embd)\n self.drop = nn.Dropout(config.dropout)\n self.transformer = [Block(config) for _ in range(config.n_layer)]\n self.ln_f = LayerNorm(config.n_embd, bias=config.bias)\n self.out_proj = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n def _sample_next_token(self, x, temperature):\n logits = mx.expand_dims(x[:, -1], axis=0) @ self.wte.weight.T\n y = logits[:, -1, :]\n y = mx.random.categorical(y * (1 / temperature))\n return y\n\n def generate(self, x: mx.array, max_new_tokens=256, temperature=0.8):\n _, t = x.shape\n pos = mx.arange(0, t, 1, dtype=x.dtype)\n mask = CausalSelfAttention.create_additive_causal_mask(t)\n x, cache = self._forward_transformer(x, pos, mask=mask, build_cache=True)\n y = self._sample_next_token(x, temperature)\n position = t\n yield y\n\n for _ in range(max_new_tokens):\n position += 1\n x = y[:, None]\n x, cache = self._forward_transformer(x, position, cache=cache)\n y = self._sample_next_token(x, temperature)\n yield y\n\n def _forward_transformer(\n self, x: mx.array, pos: mx.array, mask=None, cache=None, build_cache=False\n ):\n tok_emb = self.wte(x)\n pos_emb = self.wpe(pos)\n x = self.drop(tok_emb + pos_emb)\n kv_cache = []\n\n if cache is not None:\n for i in range(len(cache)):\n x, cache[i] = self.transformer[i](x, mask=None, cache=cache[i])\n else:\n for block in self.transformer:\n x, curr_cache = block(x, mask=mask)\n if build_cache:\n kv_cache.append(curr_cache)\n\n x = self.ln_f(x)\n return x, kv_cache if build_cache else cache\n\n def __call__(self, x):\n b, t = x.shape\n assert (\n t <= self.config.block_size\n ), f\"Cannot forward sequence of length {t}, block size is only {self.config.block_size}\"\n pos = mx.arange(0, t, 1, dtype=x.dtype)\n mask = CausalSelfAttention.create_additive_causal_mask(x.shape[1])\n\n x, _ = self._forward_transformer(x, pos, mask=mask)\n return self.out_proj(x)\n\n def loss(self, x, y):\n logits = self(x)\n loss = nn.losses.cross_entropy(\n logits.reshape(-1, logits.shape[-1]), y.reshape(-1)\n )\n mx.simplify(loss)\n return mx.mean(loss)" }, { "identifier": "AdamW", "path": "optimizer.py", "snippet": "class AdamW(optim.Adam):\n r\"\"\"Implementation of the AdamW optimizer [1].\n\n Following the above convention, in contrast with [1], we do not use bias\n correction in the first and second moments for AdamW. We update the weights\n with a weight_decay (:math:`\\lambda`) value:\n\n [1]: Loshchilov, I. and Hutter, F., 2019. Decoupled weight decay\n regularization. ICLR 2019.\n\n .. math::\n\n m_{t+1} &= \\beta_1 m_t + (1 - \\beta_1) g_t \\\\\n v_{t+1} &= \\beta_2 v_t + (1 - \\beta_2) g_t^2 \\\\\n w_{t+1} &= w_t - \\alpha (\\frac{m_{t+1}}{\\sqrt{v_{t+1} + \\epsilon}} + \\lambda w_t)\n\n Args:\n learning_rate (float): The learning rate :math:`\\alpha`.\n betas (Tuple[float, float], optional): The coefficients\n :math:`(\\beta_1, \\beta_2)` used for computing running averages of the\n gradient and its square. Default: ``(0.9, 0.999)``\n eps (float, optional): The term :math:`\\epsilon` added to the\n denominator to improve numerical stability. Default: ``1e-8``\n weight_decay (float, optional): The weight decay :math:`\\lambda`.\n Default: ``0``.\n \"\"\"\n def __init__(\n self,\n learning_rate: float,\n betas: List[float] = [0.9, 0.999],\n eps: float = 1e-8,\n weight_decay: float = 0.01,\n ):\n super().__init__(learning_rate=learning_rate, betas=betas, eps=eps)\n self.weight_decay = weight_decay\n\n def apply_single(self, gradient: mx.array, parameter: mx.array, state):\n parameter -= self.weight_decay * self.learning_rate * parameter\n return super().apply_single(gradient, parameter, state)\n\n def set_learning_rate(self, learning_rate: float):\n self.learning_rate = learning_rate" }, { "identifier": "init_tensorboard", "path": "tboard_utils.py", "snippet": "def init_tensorboard(logdir: str, **kwargs):\n \"\"\"Create a Tensorboard SummaryWriter instance that writes to `logdir`.\n\n The writer is saved as a module-level object. It can then be acquired and\n used in any file by calling `get_tensorboard()`.\n\n Args:\n logdir (str):\n A directory path to which the created SummaryWriter should output\n event files.\n kwargs (Dict[str, Any]):\n Any additional keyword arguments that should be supplied to the\n new SummaryWriter.\n \"\"\"\n global _TENSORBOARD_WRITER\n _TENSORBOARD_WRITER = SummaryWriter(logdir, **kwargs)" }, { "identifier": "get_tensorboard", "path": "tboard_utils.py", "snippet": "def get_tensorboard() -> SummaryWriter:\n \"\"\"\n Acquire the Tensorboard SummaryWriter instance created with\n `init_tensorboard()`.\n\n Returns:\n (SummaryWriter):\n The SummaryWriter instance created by the most recent call to\n `init_tensorboard()`.\n \"\"\"\n global _TENSORBOARD_WRITER\n assert _TENSORBOARD_WRITER is not None, (\n \"get_tensorboard() called before init_tensorboard(); please specify \"\n \"a logdir to init_tensorboard() first.\"\n )\n return _TENSORBOARD_WRITER" } ]
import os import math import time import numpy as np import mlx import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim from typing import List from mlx.utils import tree_flatten, tree_map from model import GPTConfig, GPT from optimizer import AdamW from tboard_utils import init_tensorboard, get_tensorboard
2,850
# model n_layer = 12 n_head = 12 n_embd = 768 dropout = 0.0 # for pretraining 0 is good, for finetuning try 0.1+ bias = False # do we use bias inside LayerNorm and Linear layers? d_type = 'float32' # adamw optimizer learning_rate = 6.0e-4 # max learning rate min_lr = 6.0e-5 num_iters = 600000 # total number of training iterations warmup_pct = 0.1 warmup_iters = 2000 lr_decay_iters = 600000 weight_decay = 1e-1 beta1 = 0.9 beta2 = 0.95 meta_vocab_size = None # dataset dataset = 'openwebtext' batch_size = 1 gradient_accumulation_steps = 512 context_size = 1024 # eval eval_interval = 10 log_interval = 10 eval_only = False out_dir = 'gpt2_openwebtext_pretrain' # ----------------------------------------------------------------------------- config_keys = [k for k,v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))] exec(open('configurator.py').read()) # overrides from command line or config file config = {k: globals()[k] for k in config_keys} # will be useful for logging # ----------------------------------------------------------------------------- # Load vocab and dataset: # poor man's data loader data_dir = os.path.join('data', dataset) train_data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r') val_data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint16, mode='r') # initialize tboard logging: os.makedirs(out_dir, exist_ok=True) tboard_dir = os.path.join(out_dir, "tboard_log") init_tensorboard(tboard_dir) def get_batch(split): data = train_data if split == 'train' else val_data ix = np.random.randint(len(data) - context_size, size=(batch_size,)) x = mx.stack([(mx.array(data[i:i+context_size])) for i in ix]).astype(mx.int64) y = mx.stack([(mx.array(data[i+1:i+1+context_size])) for i in ix]).astype(mx.int64) return x, y def print_loss(optimizer, iteration_count, average_loss, tic): toc = time.perf_counter() print( f"iter {iteration_count}: train loss {average_loss:.3f}, " f"it/sec {1.0 / (toc - tic):.3f}, " f"lr {optimizer.learning_rate:.9f}" ) return toc def update_learning_rate(it): if it < warmup_iters: return learning_rate * it / warmup_iters if it > lr_decay_iters: return min_lr decay_ratio = (it - warmup_iters) / ( lr_decay_iters - warmup_iters ) assert 0 <= decay_ratio <= 1 coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) new_lr = min_lr + coeff * (learning_rate - min_lr) return new_lr def log_tboard_dict(log_dict, itr, pre, post=''): writer = get_tensorboard() for k, v in log_dict.items(): writer.add_scalar(f'{pre}/{k}{post}', v, itr) def main(): # model init model_args = dict(n_layer=n_layer, n_head=n_head, n_embd=n_embd, block_size=context_size, bias=bias, vocab_size=None, dropout=dropout) # start with model_args from command line # initialize model: if meta_vocab_size is None: print("defaulting to vocab_size of GPT-2 to 50304 (50257 rounded up for efficiency)") model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50304 gptconf = GPTConfig(**model_args)
# model n_layer = 12 n_head = 12 n_embd = 768 dropout = 0.0 # for pretraining 0 is good, for finetuning try 0.1+ bias = False # do we use bias inside LayerNorm and Linear layers? d_type = 'float32' # adamw optimizer learning_rate = 6.0e-4 # max learning rate min_lr = 6.0e-5 num_iters = 600000 # total number of training iterations warmup_pct = 0.1 warmup_iters = 2000 lr_decay_iters = 600000 weight_decay = 1e-1 beta1 = 0.9 beta2 = 0.95 meta_vocab_size = None # dataset dataset = 'openwebtext' batch_size = 1 gradient_accumulation_steps = 512 context_size = 1024 # eval eval_interval = 10 log_interval = 10 eval_only = False out_dir = 'gpt2_openwebtext_pretrain' # ----------------------------------------------------------------------------- config_keys = [k for k,v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))] exec(open('configurator.py').read()) # overrides from command line or config file config = {k: globals()[k] for k in config_keys} # will be useful for logging # ----------------------------------------------------------------------------- # Load vocab and dataset: # poor man's data loader data_dir = os.path.join('data', dataset) train_data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r') val_data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint16, mode='r') # initialize tboard logging: os.makedirs(out_dir, exist_ok=True) tboard_dir = os.path.join(out_dir, "tboard_log") init_tensorboard(tboard_dir) def get_batch(split): data = train_data if split == 'train' else val_data ix = np.random.randint(len(data) - context_size, size=(batch_size,)) x = mx.stack([(mx.array(data[i:i+context_size])) for i in ix]).astype(mx.int64) y = mx.stack([(mx.array(data[i+1:i+1+context_size])) for i in ix]).astype(mx.int64) return x, y def print_loss(optimizer, iteration_count, average_loss, tic): toc = time.perf_counter() print( f"iter {iteration_count}: train loss {average_loss:.3f}, " f"it/sec {1.0 / (toc - tic):.3f}, " f"lr {optimizer.learning_rate:.9f}" ) return toc def update_learning_rate(it): if it < warmup_iters: return learning_rate * it / warmup_iters if it > lr_decay_iters: return min_lr decay_ratio = (it - warmup_iters) / ( lr_decay_iters - warmup_iters ) assert 0 <= decay_ratio <= 1 coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) new_lr = min_lr + coeff * (learning_rate - min_lr) return new_lr def log_tboard_dict(log_dict, itr, pre, post=''): writer = get_tensorboard() for k, v in log_dict.items(): writer.add_scalar(f'{pre}/{k}{post}', v, itr) def main(): # model init model_args = dict(n_layer=n_layer, n_head=n_head, n_embd=n_embd, block_size=context_size, bias=bias, vocab_size=None, dropout=dropout) # start with model_args from command line # initialize model: if meta_vocab_size is None: print("defaulting to vocab_size of GPT-2 to 50304 (50257 rounded up for efficiency)") model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50304 gptconf = GPTConfig(**model_args)
model = GPT(gptconf)
1
2023-12-27 04:14:24+00:00
4k
jesenzhang/ComfyUI_StreamDiffusion
streamdiffusion/acceleration/tensorrt/builder.py
[ { "identifier": "BaseModel", "path": "streamdiffusion/acceleration/tensorrt/models.py", "snippet": "class BaseModel:\n def __init__(\n self,\n fp16=False,\n device=\"cuda\",\n verbose=True,\n max_batch_size=16,\n min_batch_size=1,\n embedding_dim=768,\n text_maxlen=77,\n ):\n self.name = \"SD Model\"\n self.fp16 = fp16\n self.device = device\n self.verbose = verbose\n\n self.min_batch = min_batch_size\n self.max_batch = max_batch_size\n self.min_image_shape = 256 # min image resolution: 256x256\n self.max_image_shape = 1024 # max image resolution: 1024x1024\n self.min_latent_shape = self.min_image_shape // 8\n self.max_latent_shape = self.max_image_shape // 8\n\n self.embedding_dim = embedding_dim\n self.text_maxlen = text_maxlen\n\n def get_model(self):\n pass\n\n def get_input_names(self):\n pass\n\n def get_output_names(self):\n pass\n\n def get_dynamic_axes(self):\n return None\n\n def get_sample_input(self, batch_size, image_height, image_width):\n pass\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n return None\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n return None\n\n def optimize(self, onnx_graph):\n opt = Optimizer(onnx_graph, verbose=self.verbose)\n opt.info(self.name + \": original\")\n opt.cleanup()\n opt.info(self.name + \": cleanup\")\n opt.fold_constants()\n opt.info(self.name + \": fold constants\")\n opt.infer_shapes()\n opt.info(self.name + \": shape inference\")\n onnx_opt_graph = opt.cleanup(return_onnx=True)\n opt.info(self.name + \": finished\")\n return onnx_opt_graph\n\n def check_dims(self, batch_size, image_height, image_width):\n assert batch_size >= self.min_batch and batch_size <= self.max_batch\n assert image_height % 8 == 0 or image_width % 8 == 0\n latent_height = image_height // 8\n latent_width = image_width // 8\n assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape\n assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape\n return (latent_height, latent_width)\n\n def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):\n min_batch = batch_size if static_batch else self.min_batch\n max_batch = batch_size if static_batch else self.max_batch\n latent_height = image_height // 8\n latent_width = image_width // 8\n min_image_height = image_height if static_shape else self.min_image_shape\n max_image_height = image_height if static_shape else self.max_image_shape\n min_image_width = image_width if static_shape else self.min_image_shape\n max_image_width = image_width if static_shape else self.max_image_shape\n min_latent_height = latent_height if static_shape else self.min_latent_shape\n max_latent_height = latent_height if static_shape else self.max_latent_shape\n min_latent_width = latent_width if static_shape else self.min_latent_shape\n max_latent_width = latent_width if static_shape else self.max_latent_shape\n return (\n min_batch,\n max_batch,\n min_image_height,\n max_image_height,\n min_image_width,\n max_image_width,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n )" }, { "identifier": "build_engine", "path": "streamdiffusion/acceleration/tensorrt/utilities.py", "snippet": "def build_engine(\n engine_path: str,\n onnx_opt_path: str,\n model_data: BaseModel,\n opt_image_height: int,\n opt_image_width: int,\n opt_batch_size: int,\n build_static_batch: bool = False,\n build_dynamic_shape: bool = False,\n build_all_tactics: bool = False,\n build_enable_refit: bool = False,\n):\n _, free_mem, _ = cudart.cudaMemGetInfo()\n GiB = 2**30\n if free_mem > 6 * GiB:\n activation_carveout = 4 * GiB\n max_workspace_size = free_mem - activation_carveout\n else:\n max_workspace_size = 0\n engine = Engine(engine_path)\n input_profile = model_data.get_input_profile(\n opt_batch_size,\n opt_image_height,\n opt_image_width,\n static_batch=build_static_batch,\n static_shape=not build_dynamic_shape,\n )\n engine.build(\n onnx_opt_path,\n fp16=True,\n input_profile=input_profile,\n enable_refit=build_enable_refit,\n enable_all_tactics=build_all_tactics,\n workspace_size=max_workspace_size,\n )\n\n return engine" }, { "identifier": "export_onnx", "path": "streamdiffusion/acceleration/tensorrt/utilities.py", "snippet": "def export_onnx(\n model,\n onnx_path: str,\n model_data: BaseModel,\n opt_image_height: int,\n opt_image_width: int,\n opt_batch_size: int,\n onnx_opset: int,\n):\n with torch.inference_mode(), torch.autocast(\"cuda\"):\n inputs = model_data.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)\n torch.onnx.export(\n model,\n inputs,\n onnx_path,\n export_params=True,\n opset_version=onnx_opset,\n do_constant_folding=True,\n input_names=model_data.get_input_names(),\n output_names=model_data.get_output_names(),\n dynamic_axes=model_data.get_dynamic_axes(),\n )\n del model\n gc.collect()\n torch.cuda.empty_cache()" }, { "identifier": "optimize_onnx", "path": "streamdiffusion/acceleration/tensorrt/utilities.py", "snippet": "def optimize_onnx(\n onnx_path: str,\n onnx_opt_path: str,\n model_data: BaseModel,\n):\n onnx_opt_graph = model_data.optimize(onnx.load(onnx_path))\n onnx.save(onnx_opt_graph, onnx_opt_path)\n del onnx_opt_graph\n gc.collect()\n torch.cuda.empty_cache()" } ]
import gc import os import torch from typing import * from .models import BaseModel from .utilities import ( build_engine, export_onnx, optimize_onnx, )
1,630
def create_onnx_path(name, onnx_dir, opt=True): return os.path.join(onnx_dir, name + (".opt" if opt else "") + ".onnx") class EngineBuilder: def __init__( self,
def create_onnx_path(name, onnx_dir, opt=True): return os.path.join(onnx_dir, name + (".opt" if opt else "") + ".onnx") class EngineBuilder: def __init__( self,
model: BaseModel,
0
2023-12-29 09:00:03+00:00
4k
neobundy/MLX-Stable-Diffusion-WebUI
stable_diffusion/vae.py
[ { "identifier": "AutoencoderConfig", "path": "stable_diffusion/config.py", "snippet": "class AutoencoderConfig(BaseConfig):\n in_channels: int = 3\n out_channels: int = 3\n latent_channels_out: int = 8\n latent_channels_in: int = 4\n block_out_channels: Tuple[int] = (128, 256, 512, 512)\n layers_per_block: int = 2\n norm_num_groups: int = 32\n scaling_factor: float = 0.18215" }, { "identifier": "ResnetBlock2D", "path": "stable_diffusion/unet.py", "snippet": "class ResnetBlock2D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: Optional[int] = None,\n groups: int = 32,\n temb_channels: Optional[int] = None,\n ):\n super().__init__()\n\n out_channels = out_channels or in_channels\n\n self.norm1 = nn.GroupNorm(groups, in_channels, pytorch_compatible=True)\n self.conv1 = nn.Conv2d(\n in_channels, out_channels, kernel_size=3, stride=1, padding=1\n )\n if temb_channels is not None:\n self.time_emb_proj = nn.Linear(temb_channels, out_channels)\n self.norm2 = nn.GroupNorm(groups, out_channels, pytorch_compatible=True)\n self.conv2 = nn.Conv2d(\n out_channels, out_channels, kernel_size=3, stride=1, padding=1\n )\n\n # The weights of this 1x1 convolutional layer would be a 4-dimensional tensor\n # with shape [out_channels, in_channels, 1, 1].\n # The squeeze() function is used to remove the dimensions of size 1 from this tensor,\n # converting it to a 2-dimensional tensor with shape [out_channels, in_channels].\n # This is because the corresponding layer in the current model might be a linear layer\n # rather than a convolutional layer, and the weights for a linear layer are expected to be a 2-dimensional tensor.\n # the relevant code in stable_diffusion/model_io.py:\n # if \"conv_shortcut.weight\" in key:\n # value = value.squeeze()\n\n # The shortcut connection in the ResnetBlock2D class.\n # The if condition checks if the number of input channels (in_channels) is not equal to the number of output channels (out_channels).\n # If they are not equal, a linear transformation (nn.Linear(in_channels, out_channels)) is created and assigned to self.conv_shortcut.\n # This linear transformation is used to match the dimensions of the input and output of the residual block.\n # In a residual block, the input is added to the output after going through a series of transformations.\n # If the dimensions of the input and output are not the same, this addition operation would not be possible.\n # Therefore, a shortcut connection that transforms the input to have the same dimensions as the output is needed,\n # which is what self.conv_shortcut is doing here.\n\n if in_channels != out_channels:\n self.conv_shortcut = nn.Linear(in_channels, out_channels)\n\n def __call__(self, x, temb=None):\n if temb is not None:\n temb = self.time_emb_proj(nn.silu(temb))\n\n y = self.norm1(x)\n y = nn.silu(y)\n y = self.conv1(y)\n if temb is not None:\n y = y + temb[:, None, None, :]\n y = self.norm2(y)\n y = nn.silu(y)\n y = self.conv2(y)\n\n x = y + (x if \"conv_shortcut\" not in self else self.conv_shortcut(x))\n\n return x" }, { "identifier": "upsample_nearest", "path": "stable_diffusion/unet.py", "snippet": "def upsample_nearest(x, scale: int = 2):\n B, H, W, C = x.shape\n x = mx.broadcast_to(x[:, :, None, :, None, :], (B, H, scale, W, scale, C))\n x = x.reshape(B, H * scale, W * scale, C)\n\n return x" } ]
import math import mlx.core as mx import mlx.nn as nn from typing import List from .config import AutoencoderConfig from .unet import ResnetBlock2D, upsample_nearest
2,596
out_channels, num_layers=layers_per_block, resnet_groups=resnet_groups, add_downsample=i < len(block_out_channels) - 1, add_upsample=False, ) for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:])) ] self.mid_blocks = [ ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), Attention(block_out_channels[-1], resnet_groups), ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), ] self.conv_norm_out = nn.GroupNorm( resnet_groups, block_out_channels[-1], pytorch_compatible=True ) self.conv_out = nn.Conv2d(block_out_channels[-1], out_channels, 3, padding=1) def __call__(self, x): # input block x = self.conv_in(x) # downsample + feature increase blocks for l in self.down_blocks: x = l(x) # residual block + attention + residual block x = self.mid_blocks[0](x) x = self.mid_blocks[1](x) x = self.mid_blocks[2](x) # normalization + activation + output block x = self.conv_norm_out(x) x = nn.silu(x) x = self.conv_out(x) return x class Decoder(nn.Module): """Implements the decoder side of the Autoencoder.""" def __init__( self, in_channels: int, out_channels: int, block_out_channels: List[int] = [64], layers_per_block: int = 2, resnet_groups: int = 32, ): super().__init__() self.conv_in = nn.Conv2d( in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1 ) self.mid_blocks = [ ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), Attention(block_out_channels[-1], resnet_groups), ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), ] channels = list(reversed(block_out_channels)) channels = [channels[0]] + channels self.up_blocks = [ EncoderDecoderBlock2D( in_channels, out_channels, num_layers=layers_per_block, resnet_groups=resnet_groups, add_downsample=False, add_upsample=i < len(block_out_channels) - 1, ) for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:])) ] self.conv_norm_out = nn.GroupNorm( resnet_groups, block_out_channels[0], pytorch_compatible=True ) self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1) def __call__(self, x): x = self.conv_in(x) x = self.mid_blocks[0](x) x = self.mid_blocks[1](x) x = self.mid_blocks[2](x) for l in self.up_blocks: x = l(x) x = self.conv_norm_out(x) x = nn.silu(x) x = self.conv_out(x) return x class Autoencoder(nn.Module): """The autoencoder that allows us to perform diffusion in the latent space."""
# Copyright © 2023 Apple Inc. class Attention(nn.Module): """A single head unmasked attention for use with the VAE.""" def __init__(self, dims: int, norm_groups: int = 32): super().__init__() self.group_norm = nn.GroupNorm(norm_groups, dims, pytorch_compatible=True) self.query_proj = nn.Linear(dims, dims) self.key_proj = nn.Linear(dims, dims) self.value_proj = nn.Linear(dims, dims) self.out_proj = nn.Linear(dims, dims) def __call__(self, x): B, H, W, C = x.shape y = self.group_norm(x) queries = self.query_proj(y).reshape(B, H * W, C) keys = self.key_proj(y).reshape(B, H * W, C) values = self.value_proj(y).reshape(B, H * W, C) scale = 1 / math.sqrt(queries.shape[-1]) scores = (queries * scale) @ keys.transpose(0, 2, 1) attn = mx.softmax(scores, axis=-1) y = (attn @ values).reshape(B, H, W, C) y = self.out_proj(y) x = x + y return x # Skip connections (Residual blocks) + downsampling + upsampling: common building blocks for Encoder and Decoder class EncoderDecoderBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, num_layers: int = 1, resnet_groups: int = 32, add_downsample=True, add_upsample=True, ): super().__init__() # Add the resnet blocks self.resnets = [ ResnetBlock2D( in_channels=in_channels if i == 0 else out_channels, out_channels=out_channels, groups=resnet_groups, ) for i in range(num_layers) ] # Add an optional downsampling layer if add_downsample: self.downsample = nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=2, padding=1 ) # or upsampling layer if add_upsample: self.upsample = nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1 ) def __call__(self, x): for resnet in self.resnets: x = resnet(x) if "downsample" in self: x = self.downsample(x) if "upsample" in self: x = self.upsample(upsample_nearest(x)) return x class Encoder(nn.Module): """Implements the encoder side of the Autoencoder.""" def __init__( self, in_channels: int, out_channels: int, block_out_channels: List[int] = [64], layers_per_block: int = 2, resnet_groups: int = 32, ): super().__init__() # (B, H, W, C) -> (B, H, W, 64) self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=3, stride=1, padding=1 ) channels = [block_out_channels[0]] + list(block_out_channels) self.down_blocks = [ EncoderDecoderBlock2D( in_channels, out_channels, num_layers=layers_per_block, resnet_groups=resnet_groups, add_downsample=i < len(block_out_channels) - 1, add_upsample=False, ) for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:])) ] self.mid_blocks = [ ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), Attention(block_out_channels[-1], resnet_groups), ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), ] self.conv_norm_out = nn.GroupNorm( resnet_groups, block_out_channels[-1], pytorch_compatible=True ) self.conv_out = nn.Conv2d(block_out_channels[-1], out_channels, 3, padding=1) def __call__(self, x): # input block x = self.conv_in(x) # downsample + feature increase blocks for l in self.down_blocks: x = l(x) # residual block + attention + residual block x = self.mid_blocks[0](x) x = self.mid_blocks[1](x) x = self.mid_blocks[2](x) # normalization + activation + output block x = self.conv_norm_out(x) x = nn.silu(x) x = self.conv_out(x) return x class Decoder(nn.Module): """Implements the decoder side of the Autoencoder.""" def __init__( self, in_channels: int, out_channels: int, block_out_channels: List[int] = [64], layers_per_block: int = 2, resnet_groups: int = 32, ): super().__init__() self.conv_in = nn.Conv2d( in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1 ) self.mid_blocks = [ ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), Attention(block_out_channels[-1], resnet_groups), ResnetBlock2D( in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], groups=resnet_groups, ), ] channels = list(reversed(block_out_channels)) channels = [channels[0]] + channels self.up_blocks = [ EncoderDecoderBlock2D( in_channels, out_channels, num_layers=layers_per_block, resnet_groups=resnet_groups, add_downsample=False, add_upsample=i < len(block_out_channels) - 1, ) for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:])) ] self.conv_norm_out = nn.GroupNorm( resnet_groups, block_out_channels[0], pytorch_compatible=True ) self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1) def __call__(self, x): x = self.conv_in(x) x = self.mid_blocks[0](x) x = self.mid_blocks[1](x) x = self.mid_blocks[2](x) for l in self.up_blocks: x = l(x) x = self.conv_norm_out(x) x = nn.silu(x) x = self.conv_out(x) return x class Autoencoder(nn.Module): """The autoencoder that allows us to perform diffusion in the latent space."""
def __init__(self, config: AutoencoderConfig):
0
2023-12-25 05:49:34+00:00
4k
ffmemes/ff-backend
src/flows/storage/memes.py
[ { "identifier": "etl_memes_from_raw_telegram_posts", "path": "src/storage/service.py", "snippet": "async def etl_memes_from_raw_telegram_posts() -> None:\n insert_query = f\"\"\"\n INSERT INTO meme (\n meme_source_id, \n raw_meme_id, \n caption, \n status, \n type, \n language_code,\n published_at\n )\n SELECT \n meme_source_id,\n meme_raw_telegram.id AS raw_meme_id, \n content AS caption,\n '{MemeStatus.CREATED.value}' AS status,\n '{MemeType.IMAGE.value}' AS type,\n meme_source.language_code AS language_code,\n date AS published_at\n FROM meme_raw_telegram\n LEFT JOIN meme_source\n ON meme_source.id = meme_raw_telegram.meme_source_id\n WHERE JSONB_ARRAY_LENGTH(media) = 1\n ON CONFLICT DO NOTHING\n \"\"\"\n await execute(text(insert_query))\n # TODO: if a meme content failed to be uploaded to tg\n # then its status will be BROKEN_CONTENT_LINK forewer now." }, { "identifier": "etl_memes_from_raw_vk_posts", "path": "src/storage/service.py", "snippet": "async def etl_memes_from_raw_vk_posts() -> None:\n insert_query = f\"\"\"\n INSERT INTO meme (\n meme_source_id, \n raw_meme_id, \n caption, \n status, \n type, \n language_code,\n published_at\n )\n SELECT \n meme_source_id,\n meme_raw_vk.id AS raw_meme_id, \n content AS caption,\n '{MemeStatus.CREATED.value}' AS status,\n '{MemeType.IMAGE.value}' AS type,\n meme_source.language_code AS language_code,\n date AS published_at\n FROM meme_raw_vk\n LEFT JOIN meme_source\n ON meme_source.id = meme_raw_vk.meme_source_id\n WHERE JSONB_ARRAY_LENGTH(media) = 1\n ON CONFLICT DO NOTHING\n \"\"\"\n await execute(text(insert_query))" }, { "identifier": "get_unloaded_tg_memes", "path": "src/storage/service.py", "snippet": "async def get_unloaded_tg_memes() -> list[dict[str, Any]]:\n \"Returns only MemeType.IMAGE memes\"\n select_query = f\"\"\"\n SELECT \n meme.id,\n '{MemeType.IMAGE}' AS type,\n meme_raw_telegram.media->0->>'url' content_url\n FROM meme\n INNER JOIN meme_source \n ON meme_source.id = meme.meme_source_id\n AND meme_source.type = '{MemeSourceType.TELEGRAM.value}'\n INNER JOIN meme_raw_telegram\n ON meme_raw_telegram.id = meme.raw_meme_id\n AND meme_raw_telegram.meme_source_id = meme.meme_source_id\n WHERE 1=1\n AND meme.telegram_file_id IS NULL;\n \"\"\"\n return await fetch_all(text(select_query))" }, { "identifier": "get_unloaded_vk_memes", "path": "src/storage/service.py", "snippet": "async def get_unloaded_vk_memes() -> list[dict[str, Any]]:\n \"Returns only MemeType.IMAGE memes\"\n select_query = f\"\"\"\n SELECT \n meme.id,\n '{MemeType.IMAGE}' AS type,\n meme_raw_vk.media->>0 content_url\n FROM meme\n INNER JOIN meme_source \n ON meme_source.id = meme.meme_source_id\n AND meme_source.type = '{MemeSourceType.VK.value}'\n INNER JOIN meme_raw_vk\n ON meme_raw_vk.id = meme.raw_meme_id\n AND meme_raw_vk.meme_source_id = meme.meme_source_id\n WHERE 1=1\n AND meme.telegram_file_id IS NULL;\n \"\"\"\n return await fetch_all(text(select_query))" }, { "identifier": "get_pending_memes", "path": "src/storage/service.py", "snippet": "async def get_pending_memes() -> list[dict[str, Any]]:\n select_query = (\n select(meme)\n .where(meme.c.status == MemeStatus.CREATED)\n .order_by(nulls_first(meme.c.created_at))\n )\n return await fetch_all(select_query)" }, { "identifier": "get_memes_to_ocr", "path": "src/storage/service.py", "snippet": "async def get_memes_to_ocr(limit=100):\n select_query = (\n select(meme)\n .where(meme.c.status == MemeStatus.CREATED)\n .where(meme.c.type == MemeType.IMAGE)\n .where(meme.c.telegram_file_id.is_not(None))\n .where(meme.c.ocr_result.is_(None))\n .order_by(nulls_first(meme.c.created_at))\n .limit(limit)\n )\n return await fetch_all(select_query)" }, { "identifier": "update_meme_status_of_ready_memes", "path": "src/storage/service.py", "snippet": "async def update_meme_status_of_ready_memes() -> None:\n \"\"\" Changes the status of memes to 'ok' if they are ready to be published. \"\"\"\n update_query = f\"\"\"\n UPDATE meme\n SET status = '{MemeStatus.OK.value}'\n WHERE 1=1\n AND status = '{MemeStatus.CREATED.value}'\n AND (\n type = '{MemeType.IMAGE.value}' AND ocr_result IS NOT NULL \n OR type != '{MemeType.IMAGE.value}'\n ) \n AND telegram_file_id IS NOT NULL\n \"\"\"\n await execute(text(update_query))" }, { "identifier": "update_meme", "path": "src/storage/service.py", "snippet": "async def update_meme(meme_id: int, **kwargs) -> dict[str, Any] | None:\n update_query = (\n meme.update()\n .where(meme.c.id == meme_id)\n .values(**kwargs)\n .returning(meme)\n )\n return await fetch_one(update_query)" }, { "identifier": "download_meme_content_file", "path": "src/storage/upload.py", "snippet": "async def download_meme_content_file(\n url: AnyHttpUrl,\n):\n async with httpx.AsyncClient(timeout=10.0) as client:\n response = await client.get(\n url,\n headers={\"User-Agent\": USER_AGENT},\n )\n response.raise_for_status()\n return response.content" }, { "identifier": "upload_meme_content_to_tg", "path": "src/storage/upload.py", "snippet": "async def upload_meme_content_to_tg(\n meme_id: int,\n meme_type: MemeType,\n content: bytes, # ??\n) -> dict[str, Any] | None:\n bot = telegram.Bot(token=settings.TELEGRAM_BOT_TOKEN)\n if meme_type == MemeType.IMAGE:\n try:\n msg = await bot.send_photo(\n chat_id=settings.MEME_STORAGE_TELEGRAM_CHAT_ID, \n photo=content\n )\n except telegram.error.TimedOut:\n return None\n\n meme = await update_meme(\n meme_id=meme_id,\n telegram_file_id=msg.photo[-1].file_id,\n )\n \n if meme_type == MemeType.VIDEO:\n try:\n msg = await bot.send_video(\n chat_id=settings.MEME_STORAGE_TELEGRAM_CHAT_ID, \n video=content\n )\n except telegram.error.TimedOut:\n return None\n\n meme = await update_meme(\n meme_id=meme_id,\n telegram_file_id=msg.video.file_id,\n )\n\n if meme_type == MemeType.ANIMATION:\n try:\n msg = await bot.send_animation(\n chat_id=settings.MEME_STORAGE_TELEGRAM_CHAT_ID, \n animation=content\n )\n except telegram.error.TimedOut:\n return None\n\n meme = await update_meme(\n meme_id=meme_id,\n telegram_file_id=msg.animation.file_id,\n )\n\n return meme" }, { "identifier": "download_meme_content_from_tg", "path": "src/storage/upload.py", "snippet": "async def download_meme_content_from_tg(\n file_id: str,\n) -> bytes:\n bot = telegram.Bot(token=settings.TELEGRAM_BOT_TOKEN)\n file = await bot.get_file(file_id)\n file_bytearray = await file.download_as_bytearray()\n return bytes(file_bytearray)" }, { "identifier": "ads", "path": "src/storage/ads.py", "snippet": "STOP_WORDS = [\n \"читать далее\", \"теперь в телеграм\"\n]\nMENTION_WORDS = [\n \"@\", \"http\",\n]\ndef text_is_adverisement(original_text: str | None) -> bool:\ndef filter_caption(original_text: str | None) -> str | None:" }, { "identifier": "ocr_content", "path": "src/storage/ocr/mystic.py", "snippet": "async def ocr_content(\n content: bytes, # ??\n) -> OcrResult | None:\n try:\n mystic_file_path = await load_file_to_mystic(content)\n ocr_result = await ocr_mystic_file_path(mystic_file_path)\n except Exception as e:\n print(f\"Mystic OCR error: {e}\")\n return None\n print(f\"OCR result from Mystic: {ocr_result}\")\n\n rows = ocr_result[\"result\"][\"outputs\"][0][\"value\"]\n full_text = \"\\n\".join([r[1] for r in rows])\n\n return OcrResult(\n model=f\"mystic:{PIPELINE_ID}\",\n text=full_text, # TODO: parse from ocr_result\n raw_result=ocr_result,\n )" }, { "identifier": "MemeStatus", "path": "src/storage/constants.py", "snippet": "class MemeStatus(str, Enum):\n CREATED = \"created\"\n OK = \"ok\"\n DUPLICATE = \"duplicate\"\n AD = \"ad\"\n BROKEN_CONTENT_LINK = \"broken_content_link\"\n \n # TODO: more statuses?\n # IN_MODERATION = \"in_moderation\"" }, { "identifier": "add_watermark", "path": "src/storage/watermark.py", "snippet": "def add_watermark(image_content: bytes) -> BytesIO | None:\n image_bytes = BytesIO(image_content)\n\n try:\n image = draw_corner_watermark(\n image_bytes,\n text='@ffmemesbot',\n text_size=18,\n margin=20\n )\n except Exception as e:\n print(f'Error while adding watermark: {e}')\n return None\n\n buff = BytesIO()\n buff.name = 'image.jpeg'\n image.save(buff, 'JPEG')\n buff.seek(0)\n\n return buff" } ]
import asyncio from typing import Any from prefect import flow, get_run_logger from src.storage.service import ( etl_memes_from_raw_telegram_posts, etl_memes_from_raw_vk_posts, get_unloaded_tg_memes, get_unloaded_vk_memes, get_pending_memes, get_memes_to_ocr, update_meme_status_of_ready_memes, update_meme, ) from src.storage.upload import ( download_meme_content_file, upload_meme_content_to_tg, download_meme_content_from_tg, ) from src.storage import ads from src.storage.ocr.mystic import ocr_content from src.storage.constants import MemeStatus from src.storage.watermark import add_watermark
2,635
@flow async def upload_memes_to_telegram(unloaded_memes: list[dict[str, Any]]) -> list[dict[str, Any]]: logger = get_run_logger() logger.info(f"Received {len(unloaded_memes)} memes to upload to Telegram.") memes = [] for unloaded_meme in unloaded_memes: try: logger.info(f"Downloading meme {unloaded_meme['id']} content file.") meme_original_content = await download_meme_content_file(unloaded_meme["content_url"]) except Exception as e: logger.info(f"Meme {unloaded_meme['id']} content is not available to download, reason: {e}.")
@flow async def upload_memes_to_telegram(unloaded_memes: list[dict[str, Any]]) -> list[dict[str, Any]]: logger = get_run_logger() logger.info(f"Received {len(unloaded_memes)} memes to upload to Telegram.") memes = [] for unloaded_meme in unloaded_memes: try: logger.info(f"Downloading meme {unloaded_meme['id']} content file.") meme_original_content = await download_meme_content_file(unloaded_meme["content_url"]) except Exception as e: logger.info(f"Meme {unloaded_meme['id']} content is not available to download, reason: {e}.")
await update_meme(unloaded_meme["id"], status=MemeStatus.BROKEN_CONTENT_LINK)
13
2023-12-23 12:55:43+00:00
4k
Con6924/SPM
src/evaluation/eval_util.py
[ { "identifier": "text2img", "path": "src/engine/train_util.py", "snippet": "def text2img(pipe: DiffusionPipeline,\n prompts: Union[str, list[str]], \n negative_prompt: Union[str, list[str]] = \"\", \n width: int = 512, \n height: int = 512,\n num_inference_steps: int = 30,\n guidance_scale: int = 7.5,\n seed: int = None,\n generate_num: int = 1,\n tag: str = \"\",\n **kwargs):\n # to avoid CUDA-OOM, generate images prompt-by-prompt, unless generate_num is 1\n \n samples = []\n \n if generate_num == 1:\n if isinstance(prompts, str):\n prompts = [prompts]\n if isinstance(negative_prompt, str):\n negative_prompt = [negative_prompt] * len(prompts)\n images = pipe(\n prompts,\n negative_prompt=negative_prompt,\n width=width,\n height=height,\n num_inference_steps=num_inference_steps,\n guidance_scale=guidance_scale,\n num_images_per_prompt=generate_num,\n generator=torch.manual_seed(seed) if seed is not None else None,\n ).images\n texts = [f\"sample/{prompt.replace(' ', '_')}{'(' + tag + ')' if tag else ''}\" for prompt in prompts]\n samples = list(zip(texts, images))\n else:\n for prompt in prompts:\n images = pipe(\n prompt,\n negative_prompt=negative_prompt,\n width=width,\n height=height,\n num_inference_steps=num_inference_steps,\n guidance_scale=guidance_scale,\n num_images_per_prompt=generate_num,\n generator=torch.manual_seed(seed) if seed is not None else None,\n ).images\n texts = [f\"sample/{prompt.replace(' ', '_')}({tag}{', ' if tag else ''}{i})\" for i in range(generate_num)]\n samples.extend(list(zip(texts, images)))\n \n return samples" }, { "identifier": "RootConfig", "path": "src/configs/config.py", "snippet": "class RootConfig(BaseModel):\n prompts_file: Optional[str] = None\n \n pretrained_model: PretrainedModelConfig\n\n network: Optional[NetworkConfig] = None\n\n train: Optional[TrainConfig] = None\n\n save: Optional[SaveConfig] = None\n\n logging: Optional[LoggingConfig] = None\n\n inference: Optional[InferenceConfig] = None\n\n other: Optional[OtherConfig] = None" }, { "identifier": "imagenet_templates", "path": "src/misc/clip_templates.py", "snippet": "" } ]
import torch import clip import numpy as np import random from typing import List, Union from PIL import Image from src.engine.train_util import text2img from src.configs.config import RootConfig from src.misc.clip_templates import imagenet_templates from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from diffusers.pipelines import DiffusionPipeline
2,700
# extract all images images_feats = [image_preprocess(img) for img in images] images_feats = torch.stack(images_feats, dim=0).cuda() images_feats = model.encode_image(images_feats) # compute the similarity images_feats = images_feats / images_feats.norm(dim=1, p=2, keepdim=True) texts_feats = texts_feats / texts_feats.norm(dim=1, p=2, keepdim=True) if cross_matching: score = w * images_feats @ texts_feats.T # TODO: the *SUM* here remains to be verified return score.sum(dim=1).clamp(min=0).cpu().numpy() else: score = w * images_feats * texts_feats return score.sum(dim=1).clamp(min=0).cpu().numpy() @torch.no_grad() def clip_accuracy( images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]], ablated_texts: Union[List[str], str], anchor_texts: Union[List[str], str], w: float = 2.5, clip_model: str = "ViT-B/32", n_px: int = 224, ): """ Compute CLIPAccuracy according to CLIPScore. Args: images (List[Union[torch.Tensor, np.ndarray, PIL.Image.Image, str]]): A list of generated images. Can be a list of torch.Tensor, numpy.ndarray, PIL.Image.Image, or a str of image path. ablated_texts (Union[List[str], str]): A list of prompts that are ablated from the anchor texts. anchor_texts (Union[List[str], str]): A list of prompts that the ablated concepts fall back to. w (float, optional): The weight of the similarity score. Defaults to 2.5. clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32". n_px (int, optional): The size of images. Defaults to 224. Returns: accuracy (float): The CLIPAccuracy of generated images. size: (len(images), ) """ if isinstance(ablated_texts, str): ablated_texts = [ablated_texts] if isinstance(anchor_texts, str): anchor_texts = [anchor_texts] assert len(ablated_texts) == len( anchor_texts ), "The length of ablated_texts and anchor_texts should be the same." ablated_clip_score = clip_score(images, ablated_texts, w, clip_model, n_px) anchor_clip_score = clip_score(images, anchor_texts, w, clip_model, n_px) accuracy = np.mean(anchor_clip_score < ablated_clip_score).item() return accuracy def clip_eval_by_image( images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]], ablated_texts: Union[List[str], str], anchor_texts: Union[List[str], str], w: float = 2.5, clip_model: str = "ViT-B/32", n_px: int = 224, ): """ Compute CLIPScore and CLIPAccuracy with generated images. Args: images (List[Union[torch.Tensor, np.ndarray, PIL.Image.Image, str]]): A list of generated images. Can be a list of torch.Tensor, numpy.ndarray, PIL.Image.Image, or a str of image path. ablated_texts (Union[List[str], str]): A list of prompts that are ablated from the anchor texts. anchor_texts (Union[List[str], str]): A list of prompts that the ablated concepts fall back to. w (float, optional): The weight of the similarity score. Defaults to 2.5. clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32". n_px (int, optional): The size of images. Defaults to 224. Returns: score (float): The CLIPScore of generated images. accuracy (float): The CLIPAccuracy of generated images. """ ablated_clip_score = clip_score(images, ablated_texts, w, clip_model, n_px) anchor_clip_score = clip_score(images, anchor_texts, w, clip_model, n_px) accuracy = np.mean(anchor_clip_score < ablated_clip_score).item() score = np.mean(ablated_clip_score).item() return score, accuracy def clip_eval( pipe: DiffusionPipeline, config: RootConfig, w: float = 2.5, clip_model: str = "ViT-B/32", n_px: int = 224, ): """ Compute CLIPScore and CLIPAccuracy. For each given prompt in config.logging.prompts, we: 1. sample config.logging.eval_num templates 2. generate images with the sampled templates 3. compute CLIPScore and CLIPAccuracy between each generated image and the *corresponding* template to get the final CLIPScore and CLIPAccuracy for each prompt. Args: pipe (DiffusionPipeline): The diffusion pipeline. config (RootConfig): The root config. w (float, optional): The weight of the similarity score. Defaults to 2.5. clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32". n_px (int, optional): The size of images. Defaults to 224. Returns: score (list[float]): The CLIPScore of each concept to evaluate. accuracy (list[float]): The CLIPAccuracy of each concept to evaluate. """ scores, accs = [], [] for prompt in config.logging.prompts: templates = random.choices(imagenet_templates, k=config.logging.eval_num) templated_prompts = [template.format(prompt) for template in templates]
# ref: # - https://github.com/jmhessel/clipscore/blob/main/clipscore.py # - https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb def get_clip_preprocess(n_px=224): def Convert(image): return image.convert("RGB") image_preprocess = Compose( [ Resize(n_px, interpolation=Image.BICUBIC), CenterCrop(n_px), Convert, ToTensor(), Normalize( (0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711), ), ] ) def text_preprocess(text): return clip.tokenize(text, truncate=True) return image_preprocess, text_preprocess @torch.no_grad() def clip_score( images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]], texts: str, w: float = 2.5, clip_model: str = "ViT-B/32", n_px: int = 224, cross_matching: bool = False, ): """ Compute CLIPScore (https://arxiv.org/abs/2104.08718) for generated images according to their prompts. *Important*: same as the official implementation, we take *SUM* of the similarity scores across all the reference texts. If you are evaluating on the Concept Erasing task, it might should be modified to *MEAN*, or only one reference text should be given. Args: images (List[Union[torch.Tensor, np.ndarray, PIL.Image.Image, str]]): A list of generated images. Can be a list of torch.Tensor, numpy.ndarray, PIL.Image.Image, or a str of image path. texts (str): A list of prompts. w (float, optional): The weight of the similarity score. Defaults to 2.5. clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32". n_px (int, optional): The size of images. Defaults to 224. cross_matching (bool, optional): Whether to compute the similarity between images and texts in cross-matching manner. Returns: score (np.ndarray): The CLIPScore of generated images. size: (len(images), ) """ if isinstance(texts, str): texts = [texts] if not cross_matching: assert len(images) == len( texts ), "The length of images and texts should be the same if cross_matching is False." if isinstance(images[0], str): images = [Image.open(img) for img in images] elif isinstance(images[0], np.ndarray): images = [Image.fromarray(img) for img in images] elif isinstance(images[0], torch.Tensor): images = [Image.fromarray(img.cpu().numpy()) for img in images] else: assert isinstance(images[0], Image.Image), "Invalid image type." model, _ = clip.load(clip_model, device="cuda") image_preprocess, text_preprocess = get_clip_preprocess( n_px ) # following the official implementation, rather than using the default CLIP preprocess # extract all texts texts_feats = text_preprocess(texts).cuda() texts_feats = model.encode_text(texts_feats) # extract all images images_feats = [image_preprocess(img) for img in images] images_feats = torch.stack(images_feats, dim=0).cuda() images_feats = model.encode_image(images_feats) # compute the similarity images_feats = images_feats / images_feats.norm(dim=1, p=2, keepdim=True) texts_feats = texts_feats / texts_feats.norm(dim=1, p=2, keepdim=True) if cross_matching: score = w * images_feats @ texts_feats.T # TODO: the *SUM* here remains to be verified return score.sum(dim=1).clamp(min=0).cpu().numpy() else: score = w * images_feats * texts_feats return score.sum(dim=1).clamp(min=0).cpu().numpy() @torch.no_grad() def clip_accuracy( images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]], ablated_texts: Union[List[str], str], anchor_texts: Union[List[str], str], w: float = 2.5, clip_model: str = "ViT-B/32", n_px: int = 224, ): """ Compute CLIPAccuracy according to CLIPScore. Args: images (List[Union[torch.Tensor, np.ndarray, PIL.Image.Image, str]]): A list of generated images. Can be a list of torch.Tensor, numpy.ndarray, PIL.Image.Image, or a str of image path. ablated_texts (Union[List[str], str]): A list of prompts that are ablated from the anchor texts. anchor_texts (Union[List[str], str]): A list of prompts that the ablated concepts fall back to. w (float, optional): The weight of the similarity score. Defaults to 2.5. clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32". n_px (int, optional): The size of images. Defaults to 224. Returns: accuracy (float): The CLIPAccuracy of generated images. size: (len(images), ) """ if isinstance(ablated_texts, str): ablated_texts = [ablated_texts] if isinstance(anchor_texts, str): anchor_texts = [anchor_texts] assert len(ablated_texts) == len( anchor_texts ), "The length of ablated_texts and anchor_texts should be the same." ablated_clip_score = clip_score(images, ablated_texts, w, clip_model, n_px) anchor_clip_score = clip_score(images, anchor_texts, w, clip_model, n_px) accuracy = np.mean(anchor_clip_score < ablated_clip_score).item() return accuracy def clip_eval_by_image( images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]], ablated_texts: Union[List[str], str], anchor_texts: Union[List[str], str], w: float = 2.5, clip_model: str = "ViT-B/32", n_px: int = 224, ): """ Compute CLIPScore and CLIPAccuracy with generated images. Args: images (List[Union[torch.Tensor, np.ndarray, PIL.Image.Image, str]]): A list of generated images. Can be a list of torch.Tensor, numpy.ndarray, PIL.Image.Image, or a str of image path. ablated_texts (Union[List[str], str]): A list of prompts that are ablated from the anchor texts. anchor_texts (Union[List[str], str]): A list of prompts that the ablated concepts fall back to. w (float, optional): The weight of the similarity score. Defaults to 2.5. clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32". n_px (int, optional): The size of images. Defaults to 224. Returns: score (float): The CLIPScore of generated images. accuracy (float): The CLIPAccuracy of generated images. """ ablated_clip_score = clip_score(images, ablated_texts, w, clip_model, n_px) anchor_clip_score = clip_score(images, anchor_texts, w, clip_model, n_px) accuracy = np.mean(anchor_clip_score < ablated_clip_score).item() score = np.mean(ablated_clip_score).item() return score, accuracy def clip_eval( pipe: DiffusionPipeline, config: RootConfig, w: float = 2.5, clip_model: str = "ViT-B/32", n_px: int = 224, ): """ Compute CLIPScore and CLIPAccuracy. For each given prompt in config.logging.prompts, we: 1. sample config.logging.eval_num templates 2. generate images with the sampled templates 3. compute CLIPScore and CLIPAccuracy between each generated image and the *corresponding* template to get the final CLIPScore and CLIPAccuracy for each prompt. Args: pipe (DiffusionPipeline): The diffusion pipeline. config (RootConfig): The root config. w (float, optional): The weight of the similarity score. Defaults to 2.5. clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32". n_px (int, optional): The size of images. Defaults to 224. Returns: score (list[float]): The CLIPScore of each concept to evaluate. accuracy (list[float]): The CLIPAccuracy of each concept to evaluate. """ scores, accs = [], [] for prompt in config.logging.prompts: templates = random.choices(imagenet_templates, k=config.logging.eval_num) templated_prompts = [template.format(prompt) for template in templates]
samples = text2img(
0
2023-12-26 03:19:16+00:00
4k
theOneAndOnlyOne/BeReel
main.py
[ { "identifier": "create_images", "path": "combineImages.py", "snippet": "def create_images():\n # Example usage\n primary_folder = os.path.join(os.getcwd(), \"primary\")\n secondary_folder = os.path.join(os.getcwd(), \"secondary\")\n output_folder = os.path.join(os.getcwd(), \"combined\")\n\n overlay_images(primary_folder, secondary_folder, output_folder)" }, { "identifier": "buildSlideshow", "path": "generateSlideshow.py", "snippet": "def buildSlideshow(mode = 'classic'):\n music = os.path.join(os.getcwd(), \"curr_song.wav\")\n print(\"loading music from \", music)\n audio_file = librosa.load(music)\n y, sr = audio_file\n tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)\n beat_times_raw = librosa.frames_to_time(beat_frames,sr=sr)\n beat_times = [float(value) for value in beat_times_raw]\n beat_times = convert_to_durations(beat_times)\n print(beat_times)\n\n input_folder = os.path.join(os.getcwd(), 'combined')\n output_file = \"static/slideshow_test.mp4\"\n\n create_slideshow(input_folder, output_file, music, beat_times, mode)" }, { "identifier": "butidRecap", "path": "recap.py", "snippet": "def butidRecap():\n image_folder = os.path.join(os.getcwd(), \"combined\")\n output_path = (\n os.getcwd() + os.path.sep + \"static\" + os.path.sep + \"slideshow_test.mp4\"\n )\n frames_per_second = 30\n total_duration = 30\n\n generate_video(image_folder, output_path, frames_per_second, total_duration)" } ]
import os import requests from flask import Flask, render_template, request, jsonify from combineImages import create_images from generateSlideshow import buildSlideshow from recap import butidRecap from datetime import datetime
2,143
# Convert the input strings to datetime objects start_date_object = datetime.strptime(start_date_str, "%Y-%m-%d") end_date_object = datetime.strptime(end_date_str, "%Y-%m-%d") # Iterate through the 'data' array and download images for item in data_array: image_url = item["primary"].get("url", "") secondary_image_url = item["secondary"].get("url", "") date = item["memoryDay"] date_object = datetime.strptime(date, "%Y-%m-%d") if image_url and start_date_object <= date_object <= end_date_object: # Extracting the image name from the URL image_name = date + "_" + image_url.split("/")[-1] # Downloading the image image_path = os.path.join(folder_name, image_name) with open(image_path, "wb") as img_file: img_response = requests.get(image_url) if img_response.status_code == 200: img_file.write(img_response.content) print(f"Downloaded {image_name} to {folder_name}") else: print(f"Failed to download {image_name}") if secondary_image_url and start_date_object <= date_object <= end_date_object: # Extracting the image name from the URL image_name = date + "_" + secondary_image_url.split("/")[-1] # Downloading the image image_path = os.path.join(secondary_folder_name, image_name) with open(image_path, "wb") as img_file: img_response = requests.get(secondary_image_url) if img_response.status_code == 200: img_file.write(img_response.content) print(f"Downloaded {image_name} to {secondary_folder_name}") else: print(f"Failed to download {image_name}") return "complete" # All images referenced in the 'primary' URLs should now be saved in the 'primary' folder # 'secondary' URLS saved in 'secondary', etc. # ------------------------------------------------------------------------------------------------------------------------- # Flask App Routing @app.route("/", methods=["GET", "POST"]) def index(): if request.method == "POST": phone_number = request.form["phone_number"] otp_session = send_code(phone_number) if otp_session != "n/a": return render_template("verify.html", otp_session=otp_session) return render_template( "index.html", message="Invalid phone number. Check formatting and Please try again.", ) return render_template("index.html") @app.route("/verify", methods=["POST"]) def verify_code(): if request.method == "POST": user_code = request.form["verification_code"] otp_session = request.form["otp_session"] print("> verify_code otp_session: ", otp_session) tokenObj = verify(otp_session, user_code) if tokenObj != "n/a": return render_template("process.html", tokenObj=tokenObj) else: return render_template("failure.html") # return render_template('verify.html', tokenObj='n/a', message='Invalid verification code. Please try again.') return render_template("verify.html") @app.route("/process", methods=["POST"]) def process_data(): if request.method == "POST": start_date_range = request.form["start_date_range"] end_date_range = request.form["end_date_range"] wav_file = request.files["wav_file"] tokenObj = request.form["tokenObj"] mode = request.form.get("mode") print("> HTML Form Elements: ") print("start_date_range ", str(start_date_range)) print("end_date_range ", str(end_date_range)) print("wav_file ", str(wav_file)) print("mode", str(mode)) # Call get_memories function print("> donwloading music file locally: ") try: # Save the uploaded WAV file locally upload_directory = os.getcwd() print("saving file to ", upload_directory) if not os.path.exists(upload_directory): os.makedirs(upload_directory) wav_file.save(os.path.join(upload_directory, "curr_song.wav")) except Exception as e: print(f"Error in processing data: {str(e)}") result = " " if not os.path.exists("primary") or not os.path.exists("secondary"): print("> downloading images locally") result = get_memories(tokenObj, start_date_range, end_date_range) if result != "n/a": # Execute the Python functions create_images() # process images and apply effects # do something with current page here
app = Flask(__name__, template_folder="templates") # Acquire Phone Number from User def send_code(phone): print("> Entered phone number is ", phone) # First Post to send out OTP session and code url_send_code = "https://berealapi.fly.dev/login/send-code" # IMPORTANT: Format must be +########## payload = {"phone": phone} print("-- Sending OTP Session Request --") response = requests.post(url_send_code, json=payload) otp_session = "n/a" if response.status_code == 201: print("> Request successful!") print("Response:", response.json()) response_json = response.json() if "data" in response_json and "otpSession" in response_json["data"]: otp_session = response_json["data"]["otpSession"] print("OTP Session:", otp_session) else: print("No 'otpSession' found in the response.") else: print("Request failed with status code:", response.status_code) print(response.json()) return otp_session # Verify Session using otp_session code and user entered otp_code recieved from phone notification def verify(otp_session, otp_code): # print("please enter OTP code") # otp_code = input() print("> OTP: ", otp_code) # Second POST request to verify base don user input url_verify = "https://berealapi.fly.dev/login/verify" payload_verify = {"code": otp_code, "otpSession": otp_session} print("-- Sending Verify Request --") response_verify = requests.post(url_verify, json=payload_verify) tokenObj = "n/a" if response_verify.status_code == 201: print("> Verification request successful!") print("Response:", response_verify.json()) # Process the verification response if needed response_json = response_verify.json() if "data" in response_json and "token" in response_json["data"]: tokenObj = response_json["data"]["token"] print("tokenObj:", tokenObj) else: print("No 'tokenObj' found in the response.") exit() else: print( "> Verification request failed with status code:", response_verify.status_code, ) print(response_verify.json()) exit() return tokenObj # Fetch user memories. Skip to this stage if we already acquired reusable token def get_memories(tokenObj, start_date_range, end_date_range): url_mem_feed = "https://berealapi.fly.dev/friends/mem-feed" headers = {"token": tokenObj} # Create a folder named 'primary' if it doesn't exist folder_name = "primary" if not os.path.exists(folder_name): os.makedirs(folder_name) # Create a folder named 'secondary' if it doesn't exist secondary_folder_name = "secondary" if not os.path.exists(secondary_folder_name): os.makedirs(secondary_folder_name) print("-- Sending Get Memories Request --") response_mem_feed = requests.get(url_mem_feed, headers=headers) data_array = [] if response_mem_feed.status_code == 200: print("> GET request successful!") # Process the response from mem-feed endpoint print("Response:", response_mem_feed.json()) print("we did it yay") response_data = response_mem_feed.json().get("data", {}) data_array = response_data.get("data", []) else: print("GET request failed with status code:", response_mem_feed.status_code) start_date_str = str(start_date_range) end_date_str = str(end_date_range) # Convert the input strings to datetime objects start_date_object = datetime.strptime(start_date_str, "%Y-%m-%d") end_date_object = datetime.strptime(end_date_str, "%Y-%m-%d") # Iterate through the 'data' array and download images for item in data_array: image_url = item["primary"].get("url", "") secondary_image_url = item["secondary"].get("url", "") date = item["memoryDay"] date_object = datetime.strptime(date, "%Y-%m-%d") if image_url and start_date_object <= date_object <= end_date_object: # Extracting the image name from the URL image_name = date + "_" + image_url.split("/")[-1] # Downloading the image image_path = os.path.join(folder_name, image_name) with open(image_path, "wb") as img_file: img_response = requests.get(image_url) if img_response.status_code == 200: img_file.write(img_response.content) print(f"Downloaded {image_name} to {folder_name}") else: print(f"Failed to download {image_name}") if secondary_image_url and start_date_object <= date_object <= end_date_object: # Extracting the image name from the URL image_name = date + "_" + secondary_image_url.split("/")[-1] # Downloading the image image_path = os.path.join(secondary_folder_name, image_name) with open(image_path, "wb") as img_file: img_response = requests.get(secondary_image_url) if img_response.status_code == 200: img_file.write(img_response.content) print(f"Downloaded {image_name} to {secondary_folder_name}") else: print(f"Failed to download {image_name}") return "complete" # All images referenced in the 'primary' URLs should now be saved in the 'primary' folder # 'secondary' URLS saved in 'secondary', etc. # ------------------------------------------------------------------------------------------------------------------------- # Flask App Routing @app.route("/", methods=["GET", "POST"]) def index(): if request.method == "POST": phone_number = request.form["phone_number"] otp_session = send_code(phone_number) if otp_session != "n/a": return render_template("verify.html", otp_session=otp_session) return render_template( "index.html", message="Invalid phone number. Check formatting and Please try again.", ) return render_template("index.html") @app.route("/verify", methods=["POST"]) def verify_code(): if request.method == "POST": user_code = request.form["verification_code"] otp_session = request.form["otp_session"] print("> verify_code otp_session: ", otp_session) tokenObj = verify(otp_session, user_code) if tokenObj != "n/a": return render_template("process.html", tokenObj=tokenObj) else: return render_template("failure.html") # return render_template('verify.html', tokenObj='n/a', message='Invalid verification code. Please try again.') return render_template("verify.html") @app.route("/process", methods=["POST"]) def process_data(): if request.method == "POST": start_date_range = request.form["start_date_range"] end_date_range = request.form["end_date_range"] wav_file = request.files["wav_file"] tokenObj = request.form["tokenObj"] mode = request.form.get("mode") print("> HTML Form Elements: ") print("start_date_range ", str(start_date_range)) print("end_date_range ", str(end_date_range)) print("wav_file ", str(wav_file)) print("mode", str(mode)) # Call get_memories function print("> donwloading music file locally: ") try: # Save the uploaded WAV file locally upload_directory = os.getcwd() print("saving file to ", upload_directory) if not os.path.exists(upload_directory): os.makedirs(upload_directory) wav_file.save(os.path.join(upload_directory, "curr_song.wav")) except Exception as e: print(f"Error in processing data: {str(e)}") result = " " if not os.path.exists("primary") or not os.path.exists("secondary"): print("> downloading images locally") result = get_memories(tokenObj, start_date_range, end_date_range) if result != "n/a": # Execute the Python functions create_images() # process images and apply effects # do something with current page here
buildSlideshow(mode) # assemble files and load audio
1
2023-12-25 20:55:01+00:00
4k
dakpinaroglu/Frame2seq
frame2seq/utils/design.py
[ { "identifier": "residue_constants", "path": "frame2seq/utils/residue_constants.py", "snippet": "def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]],\n def make_bond_key(atom1_name, atom2_name):\ndef sequence_to_onehot(\n sequence: str,\n mapping: Mapping[str, int],\n) -> np.ndarray:\ndef _make_standard_atom_mask() -> np.ndarray:\ndef _make_rigid_transformation_4x4(ex, ey, translation):\nAA_TO_ID = {\n 'A': 0,\n 'C': 1,\n 'D': 2,\n 'E': 3,\n 'F': 4,\n 'G': 5,\n 'H': 6,\n 'I': 7,\n 'K': 8,\n 'L': 9,\n 'M': 10,\n 'N': 11,\n 'P': 12,\n 'Q': 13,\n 'R': 14,\n 'S': 15,\n 'T': 16,\n 'V': 17,\n 'W': 18,\n 'Y': 19,\n 'X': 20,\n}\nID_TO_AA = {\n 0: 'A',\n 1: 'C',\n 2: 'D',\n 3: 'E',\n 4: 'F',\n 5: 'G',\n 6: 'H',\n 7: 'I',\n 8: 'K',\n 9: 'L',\n 10: 'M',\n 11: 'N',\n 12: 'P',\n 13: 'Q',\n 14: 'R',\n 15: 'S',\n 16: 'T',\n 17: 'V',\n 18: 'W',\n 19: 'Y',\n 20: 'X',\n}\nSTANDARD_ATOM_MASK = _make_standard_atom_mask()" }, { "identifier": "get_neg_pll", "path": "frame2seq/utils/util.py", "snippet": "def get_neg_pll(probs, seq):\n seq_probs = torch.gather(probs, 1, seq.unsqueeze(-1)).squeeze(-1)\n neg_pll = -1 * torch.log(seq_probs)\n avg_neg_pll = neg_pll.sum().item() / len(neg_pll)\n return neg_pll, avg_neg_pll" }, { "identifier": "get_inference_inputs", "path": "frame2seq/utils/pdb2input.py", "snippet": "def get_inference_inputs(pdb_file, chain_id):\n atom_positions, aatype, seq_mask = get_parsed_inputs(pdb_file, chain_id)\n seq_mask = seq_mask.unsqueeze(0)\n aatype = torch.from_numpy(aatype)\n aatype = aatype.unsqueeze(0)\n X = atom_positions\n X = X.unsqueeze(0)\n return seq_mask, aatype, X" }, { "identifier": "output_fasta", "path": "frame2seq/utils/pred2output.py", "snippet": "def output_fasta(preds, fasta_dir):\n \"\"\"\n Given predicted sequences, write to a fasta file.\n \"\"\"\n with open(f\"{fasta_dir}/seqs.fasta\", \"a\") as f:\n for sample_i in range(len(preds)):\n pdbid_i = preds[sample_i]['pdbid']\n chain_i = preds[sample_i]['chain']\n seq_i = preds[sample_i]['seq']\n recovery_i = preds[sample_i]['recovery']\n avg_neg_pll_i = preds[sample_i]['avg_neg_pll']\n temp_i = preds[sample_i]['temp']\n f.write(\n f\">pdbid={pdbid_i} chain_id={chain_i} recovery={recovery_i*100:.2f}% score={avg_neg_pll_i:.2f} temperature={temp_i}\\n\"\n )\n f.write(f\"{seq_i}\\n\")" }, { "identifier": "output_indiv_fasta", "path": "frame2seq/utils/pred2output.py", "snippet": "def output_indiv_fasta(model_outs, fasta_dir):\n \"\"\"\n Given a predicted sequence, write to a fasta file.\n \"\"\"\n pdbid = model_outs['pdbid']\n chain = model_outs['chain']\n sample = model_outs['sample']\n seq = model_outs['seq']\n recovery = model_outs['recovery']\n avg_neg_pll = model_outs['avg_neg_pll']\n temp = model_outs['temp']\n\n with open(f\"{fasta_dir}/{pdbid}_{chain}_seq{sample}.fasta\", \"w\") as f:\n f.write(\n f\">pdbid={pdbid} chain_id={chain} recovery={recovery*100:.2f}% score={avg_neg_pll:.2f} temperature={temp}\\n\"\n )\n f.write(f\"{seq}\\n\")" }, { "identifier": "output_indiv_csv", "path": "frame2seq/utils/pred2output.py", "snippet": "def output_indiv_csv(scores, csv_dir):\n \"\"\"\n Given per-residue negative pseudo-log-likelihoods, write to a csv file.\n \"\"\"\n pdbid = scores['pdbid']\n chain = scores['chain']\n sample = scores['sample']\n res_idx = scores['res_idx']\n neg_pll = scores['neg_pll']\n\n df = pd.DataFrame(\n list(zip(res_idx, neg_pll)),\n columns=['Residue index', 'Negative pseudo-log-likelihood'])\n df.to_csv(f\"{csv_dir}/{pdbid}_{chain}_seq{sample}.csv\", index=False)" } ]
import os import torch import numpy as np from tqdm import tqdm from frame2seq.utils import residue_constants from frame2seq.utils.util import get_neg_pll from frame2seq.utils.pdb2input import get_inference_inputs from frame2seq.utils.pred2output import output_fasta, output_indiv_fasta, output_indiv_csv
2,315
def design(self, pdb_file, chain_id, temperature, num_samples, omit_AA, fixed_positions, save_indiv_seqs, save_indiv_neg_pll, verbose): seq_mask, aatype, X = get_inference_inputs(pdb_file, chain_id) seq_mask = seq_mask.to(self.device) aatype = aatype.to(self.device) X = X.to(self.device) str_form = [residue_constants.ID_TO_AA[int(i)] for i in aatype[0]] input_aatype_onehot = residue_constants.sequence_to_onehot( sequence=str_form, mapping=residue_constants.AA_TO_ID, ) input_aatype_onehot = torch.from_numpy(input_aatype_onehot).float() input_aatype_onehot = input_aatype_onehot.unsqueeze(0) input_aatype_onehot = input_aatype_onehot.to(self.device) input_aatype_onehot = torch.zeros_like(input_aatype_onehot) input_aatype_onehot[:, :, 20] = 1 # all positions are masked (set to unknown) if fixed_positions is not None: for pos in fixed_positions: pos = pos - 1 # convert to 0-indexing input_aatype_onehot[:, pos, :] = 0 input_aatype_onehot[:, pos, aatype[0][ pos]] = 1 # fixed positions set to the input sequence model_outs, scores, preds = {}, {}, [] with torch.no_grad(): pred_seq1 = self.models[0].forward(X, seq_mask, input_aatype_onehot) pred_seq2 = self.models[1].forward(X, seq_mask, input_aatype_onehot) pred_seq3 = self.models[2].forward(X, seq_mask, input_aatype_onehot) pred_seq = (pred_seq1 + pred_seq2 + pred_seq3) / 3 # ensemble if omit_AA is not None: for aa in omit_AA: pred_seq[:, :, residue_constants.AA_TO_ID[aa]] = -np.inf pred_seq = pred_seq / temperature pred_seq = torch.nn.functional.softmax(pred_seq, dim=-1) pred_seq = pred_seq[seq_mask] sampled_seq = torch.multinomial(pred_seq, num_samples=num_samples, replacement=True) for sample in tqdm(range(num_samples)): sampled_seq_i = sampled_seq[:, sample] input_seq_i = aatype[seq_mask] # sequence from the input PDB file neg_pll, avg_neg_pll = get_neg_pll(pred_seq, sampled_seq_i) input_neg_pll, input_avg_neg_pll = get_neg_pll( pred_seq, input_seq_i ) # negative pseudo-log-likelihood of the input sequence recovery = torch.sum( sampled_seq_i == aatype[seq_mask]) / torch.sum(seq_mask) sampled_seq_i = [ residue_constants.ID_TO_AA[int(i)] for i in sampled_seq_i ] sampled_seq_i = "".join(sampled_seq_i) if verbose: print(f"Recovery : {recovery*100:.2f}%") print( f"Average negative pseudo-log-likelihood : {avg_neg_pll:.2f}" ) print(f"Sequence: {sampled_seq_i}") model_outs['pdbid'] = pdb_file.split('/')[-1].split('.')[0] model_outs['chain'] = chain_id model_outs['sample'] = sample model_outs['seq'] = sampled_seq_i model_outs['recovery'] = recovery model_outs['avg_neg_pll'] = avg_neg_pll model_outs['temp'] = temperature preds.append(model_outs) fasta_dir = os.path.join(self.save_dir, 'seqs') os.makedirs(fasta_dir, exist_ok=True) if save_indiv_seqs: # save per-sequence fasta files output_indiv_fasta(model_outs, fasta_dir) if save_indiv_neg_pll: # save per-residue negative pseudo-log-likelihoods scores['pdbid'] = pdb_file.split('/')[-1].split('.')[0] scores['chain'] = chain_id scores['sample'] = sample scores['res_idx'] = [i for i in range(len(sampled_seq_i))] scores['neg_pll'] = [ neg_pll[i].item() for i in range(len(sampled_seq_i)) ] csv_dir = os.path.join(self.save_dir, 'scores') os.makedirs(csv_dir, exist_ok=True) output_indiv_csv(scores, csv_dir)
def design(self, pdb_file, chain_id, temperature, num_samples, omit_AA, fixed_positions, save_indiv_seqs, save_indiv_neg_pll, verbose): seq_mask, aatype, X = get_inference_inputs(pdb_file, chain_id) seq_mask = seq_mask.to(self.device) aatype = aatype.to(self.device) X = X.to(self.device) str_form = [residue_constants.ID_TO_AA[int(i)] for i in aatype[0]] input_aatype_onehot = residue_constants.sequence_to_onehot( sequence=str_form, mapping=residue_constants.AA_TO_ID, ) input_aatype_onehot = torch.from_numpy(input_aatype_onehot).float() input_aatype_onehot = input_aatype_onehot.unsqueeze(0) input_aatype_onehot = input_aatype_onehot.to(self.device) input_aatype_onehot = torch.zeros_like(input_aatype_onehot) input_aatype_onehot[:, :, 20] = 1 # all positions are masked (set to unknown) if fixed_positions is not None: for pos in fixed_positions: pos = pos - 1 # convert to 0-indexing input_aatype_onehot[:, pos, :] = 0 input_aatype_onehot[:, pos, aatype[0][ pos]] = 1 # fixed positions set to the input sequence model_outs, scores, preds = {}, {}, [] with torch.no_grad(): pred_seq1 = self.models[0].forward(X, seq_mask, input_aatype_onehot) pred_seq2 = self.models[1].forward(X, seq_mask, input_aatype_onehot) pred_seq3 = self.models[2].forward(X, seq_mask, input_aatype_onehot) pred_seq = (pred_seq1 + pred_seq2 + pred_seq3) / 3 # ensemble if omit_AA is not None: for aa in omit_AA: pred_seq[:, :, residue_constants.AA_TO_ID[aa]] = -np.inf pred_seq = pred_seq / temperature pred_seq = torch.nn.functional.softmax(pred_seq, dim=-1) pred_seq = pred_seq[seq_mask] sampled_seq = torch.multinomial(pred_seq, num_samples=num_samples, replacement=True) for sample in tqdm(range(num_samples)): sampled_seq_i = sampled_seq[:, sample] input_seq_i = aatype[seq_mask] # sequence from the input PDB file neg_pll, avg_neg_pll = get_neg_pll(pred_seq, sampled_seq_i) input_neg_pll, input_avg_neg_pll = get_neg_pll( pred_seq, input_seq_i ) # negative pseudo-log-likelihood of the input sequence recovery = torch.sum( sampled_seq_i == aatype[seq_mask]) / torch.sum(seq_mask) sampled_seq_i = [ residue_constants.ID_TO_AA[int(i)] for i in sampled_seq_i ] sampled_seq_i = "".join(sampled_seq_i) if verbose: print(f"Recovery : {recovery*100:.2f}%") print( f"Average negative pseudo-log-likelihood : {avg_neg_pll:.2f}" ) print(f"Sequence: {sampled_seq_i}") model_outs['pdbid'] = pdb_file.split('/')[-1].split('.')[0] model_outs['chain'] = chain_id model_outs['sample'] = sample model_outs['seq'] = sampled_seq_i model_outs['recovery'] = recovery model_outs['avg_neg_pll'] = avg_neg_pll model_outs['temp'] = temperature preds.append(model_outs) fasta_dir = os.path.join(self.save_dir, 'seqs') os.makedirs(fasta_dir, exist_ok=True) if save_indiv_seqs: # save per-sequence fasta files output_indiv_fasta(model_outs, fasta_dir) if save_indiv_neg_pll: # save per-residue negative pseudo-log-likelihoods scores['pdbid'] = pdb_file.split('/')[-1].split('.')[0] scores['chain'] = chain_id scores['sample'] = sample scores['res_idx'] = [i for i in range(len(sampled_seq_i))] scores['neg_pll'] = [ neg_pll[i].item() for i in range(len(sampled_seq_i)) ] csv_dir = os.path.join(self.save_dir, 'scores') os.makedirs(csv_dir, exist_ok=True) output_indiv_csv(scores, csv_dir)
output_fasta(
3
2023-12-25 09:29:36+00:00
4k
davep/oshit
oshit/app/widgets/hacker_news.py
[ { "identifier": "Article", "path": "oshit/hn/item/article.py", "snippet": "class Article(Item):\n \"\"\"Base class for all types of articles on HackerNews.\"\"\"\n\n descendants: int = 0\n \"\"\"The number of descendants of the article.\"\"\"\n\n score: int = 0\n \"\"\"The score of the article.\"\"\"\n\n title: str = \"\"\n \"\"\"The title of the article.\"\"\"\n\n def populate_with(self, data: dict[str, Any]) -> Self:\n \"\"\"Populate the item with the data from the given JSON value.\n\n Args:\n data: The data to populate from.\n\n Returns:\n Self\n \"\"\"\n self.descendants = data.get(\"descendants\", 0)\n self.score = data[\"score\"]\n self.title = data[\"title\"]\n return super().populate_with(data)" }, { "identifier": "load_configuration", "path": "oshit/app/data/config.py", "snippet": "@lru_cache(maxsize=None)\ndef load_configuration() -> Configuration:\n \"\"\"Load the configuration.\n\n Returns:\n The configuration.\n\n Note:\n As a side-effect, if the configuration doesn't exist a default one\n will be saved to storage.\n\n This function is designed so that it's safe and low-cost to\n repeatedly call it. The configuration is cached and will only be\n loaded from storage when necessary.\n \"\"\"\n source = configuration_file()\n return (\n Configuration(**loads(source.read_text(encoding=\"utf-8\")))\n if source.exists()\n else save_configuration(Configuration())\n )" }, { "identifier": "save_configuration", "path": "oshit/app/data/config.py", "snippet": "def save_configuration(configuration: Configuration) -> Configuration:\n \"\"\"Save the given configuration.\n\n Args:\n The configuration to store.\n\n Returns:\n The configuration.\n \"\"\"\n load_configuration.cache_clear()\n configuration_file().write_text(\n dumps(asdict(configuration), indent=4), encoding=\"utf-8\"\n )\n return load_configuration()" }, { "identifier": "Items", "path": "oshit/app/widgets/items.py", "snippet": "class Items(Generic[ArticleType], TabPane):\n \"\"\"The pane that displays the top stories.\"\"\"\n\n CONTEXT_HELP = \"\"\"\n ## View keys\n\n | Key | Description |\n | - | - |\n | <kbd>Ctrl</kbd>+<knd>r</kbd> | Reload. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Items OptionList {\n height: 1fr;\n border: none;\n padding: 0;\n background: $panel;\n\n &:focus {\n border: none;\n background: $panel;\n }\n }\n \"\"\"\n\n BINDINGS = [\n (\"ctrl+r\", \"reload\"),\n ]\n\n compact: var[bool] = var(True)\n \"\"\"Should we use a compact display?\"\"\"\n\n def __init__(\n self, title: str, key: str, source: Callable[[], Awaitable[list[ArticleType]]]\n ) -> None:\n \"\"\"Initialise the pane.\n\n Args:\n title: The title for the pane.\n key: The key used to switch to this pane.\n source: The source of items for the pane.\n \"\"\"\n super().__init__(f\"{title.capitalize()} [dim]\\\\[{key}][/]\", id=title)\n self._description = title\n \"\"\"The description of the pane.\"\"\"\n self._snarfed: datetime | None = None\n \"\"\"The time when the data was snarfed.\"\"\"\n self._source = source\n \"\"\"The source of items to show.\"\"\"\n self._items: list[ArticleType] = []\n \"\"\"The items to show.\"\"\"\n\n def compose(self) -> ComposeResult:\n \"\"\"Compose the content of the pane.\"\"\"\n yield ArticleList()\n\n @property\n def description(self) -> str:\n \"\"\"The description for this pane.\"\"\"\n suffix = \"\"\n if self._snarfed is None:\n suffix = \" - Loading...\"\n elif not self._items:\n suffix = \" - Reloading...\"\n else:\n suffix = f\" - Updated {naturaltime(self._snarfed)}\"\n return f\"{self._description.capitalize()}{suffix}\"\n\n def _redisplay(self) -> None:\n \"\"\"Redisplay the items.\"\"\"\n display = self.query_one(OptionList)\n remember = display.highlighted\n display.clear_options().add_options(\n [HackerNewsArticle(item, self.compact) for item in self._items]\n )\n display.highlighted = remember\n\n @work\n async def _load(self) -> None:\n \"\"\"Load up the items and display them.\"\"\"\n display = self.query_one(OptionList)\n display.loading = True\n self._refresh_description()\n try:\n self._items = await self._source()\n except HN.RequestError as error:\n self.app.bell()\n self.notify(\n str(error),\n title=f\"Error loading items for '{self._description.capitalize()}'\",\n timeout=8,\n severity=\"error\",\n )\n else:\n self._snarfed = datetime.now()\n self._redisplay()\n display.loading = False\n self._refresh_description()\n\n def _refresh_description(self) -> None:\n \"\"\"Force a refresh of the description.\"\"\"\n # pylint:disable=attribute-defined-outside-init\n self.screen.sub_title = self.description\n\n @property\n def loaded(self) -> bool:\n \"\"\"Has this tab loaded its items?\"\"\"\n return bool(self._items)\n\n def on_show(self) -> None:\n \"\"\"Handle being shown.\"\"\"\n if not self.loaded:\n self._load()\n\n def steal_focus(self) -> None:\n \"\"\"Steal focus for the item list within.\"\"\"\n self.query_one(OptionList).focus()\n\n def _watch_compact(self) -> None:\n \"\"\"React to the compact setting being changed.\"\"\"\n if self.loaded:\n self._redisplay()\n\n @on(OptionList.OptionSelected)\n def visit(self, event: OptionList.OptionSelected) -> None:\n \"\"\"Handle an option list item being selected.\"\"\"\n assert isinstance(option := event.option, HackerNewsArticle)\n open_url(option.article.visitable_url)\n\n def action_reload(self) -> None:\n \"\"\"Reload the items\"\"\"\n self._items = []\n self._load()" } ]
from textual import on from textual.reactive import var from textual.widgets import TabbedContent, Tabs from ...hn.item import Article from ..data import load_configuration, save_configuration from .items import Items
1,696
"""Widget that displays the HackerNews content.""" ############################################################################## # Textual imports. ############################################################################## # Local imports. ############################################################################## class HackerNews(TabbedContent): """The HackerNews content.""" BINDINGS = [ ("escape", "escape"), ("down, enter", "pane"), ("left", "previous"), ("right", "next"), ] compact: var[bool] = var(True) """Should we use a compact or relaxed display?""" def on_mount(self) -> None: """Configure the widget once the DOM is ready.""" self.compact = load_configuration().compact_mode @property
"""Widget that displays the HackerNews content.""" ############################################################################## # Textual imports. ############################################################################## # Local imports. ############################################################################## class HackerNews(TabbedContent): """The HackerNews content.""" BINDINGS = [ ("escape", "escape"), ("down, enter", "pane"), ("left", "previous"), ("right", "next"), ] compact: var[bool] = var(True) """Should we use a compact or relaxed display?""" def on_mount(self) -> None: """Configure the widget once the DOM is ready.""" self.compact = load_configuration().compact_mode @property
def active_items(self) -> Items[Article]:
0
2023-12-25 14:06:07+00:00
4k
Maximilian-Winter/llama-cpp-agent
src/llama_cpp_agent/llm_agent.py
[ { "identifier": "LlamaLLMSettings", "path": "src/llama_cpp_agent/llm_settings.py", "snippet": "class LlamaLLMSettings:\n model_path: str\n n_gpu_layers: int = 0\n f16_kv: bool = True\n offload_kqv: bool = True\n use_mlock: bool = False\n embedding: bool = False\n n_threads: int = None\n n_batch: int = 512\n n_ctx: int = 512\n last_n_tokens_size: int = 64\n verbose: bool = False\n seed: int = -1\n\n def save(self, file_path: str):\n with open(file_path, 'w', encoding=\"utf-8\") as file:\n json.dump(self.as_dict(), file, indent=4)\n\n @staticmethod\n def load_from_file(file_path: str) -> \"LlamaLLMSettings\":\n with open(file_path, 'r', encoding=\"utf-8\") as file:\n loaded_settings = json.load(file)\n return LlamaLLMSettings(**loaded_settings)\n\n @staticmethod\n def load_from_dict(settings: dict) -> \"LlamaLLMSettings\":\n return LlamaLLMSettings(**settings)\n\n def as_dict(self) -> dict:\n return self.__dict__" }, { "identifier": "MessagesFormatterType", "path": "src/llama_cpp_agent/messages_formatter.py", "snippet": "class MessagesFormatterType(Enum):\n MIXTRAL = 1\n CHATML = 2\n VICUNA = 3\n LLAMA_2 = 4\n SYNTHIA = 5\n NEURAL_CHAT = 6\n SOLAR = 7\n OPEN_CHAT = 8" }, { "identifier": "get_predefined_messages_formatter", "path": "src/llama_cpp_agent/messages_formatter.py", "snippet": "def get_predefined_messages_formatter(formatter_type: MessagesFormatterType) -> MessagesFormatter:\n return predefined_formatter[formatter_type]" }, { "identifier": "MessagesFormatter", "path": "src/llama_cpp_agent/messages_formatter.py", "snippet": "class MessagesFormatter:\n def __init__(self, PRE_PROMPT: str, SYS_PROMPT_START: str, SYS_PROMPT_END: str, USER_PROMPT_START: str,\n USER_PROMPT_END: str,\n ASSISTANT_PROMPT_START: str,\n ASSISTANT_PROMPT_END: str,\n INCLUDE_SYS_PROMPT_IN_FIRST_USER_MESSAGE: bool,\n DEFAULT_STOP_SEQUENCES: List[str],\n USE_USER_ROLE_FUNCTION_CALL_RESULT: bool = True,\n FUNCTION_PROMPT_START: str = \"\",\n FUNCTION_PROMPT_END: str = \"\"):\n self.PRE_PROMPT = PRE_PROMPT\n self.SYS_PROMPT_START = SYS_PROMPT_START\n self.SYS_PROMPT_END = SYS_PROMPT_END\n self.USER_PROMPT_START = USER_PROMPT_START\n self.USER_PROMPT_END = USER_PROMPT_END\n self.ASSISTANT_PROMPT_START = ASSISTANT_PROMPT_START\n self.ASSISTANT_PROMPT_END = ASSISTANT_PROMPT_END\n self.INCLUDE_SYS_PROMPT_IN_FIRST_USER_MESSAGE = INCLUDE_SYS_PROMPT_IN_FIRST_USER_MESSAGE\n self.DEFAULT_STOP_SEQUENCES = DEFAULT_STOP_SEQUENCES\n self.FUNCTION_PROMPT_START = FUNCTION_PROMPT_START\n self.FUNCTION_PROMPT_END = FUNCTION_PROMPT_END\n self.USE_USER_ROLE_FUNCTION_CALL_RESULT = USE_USER_ROLE_FUNCTION_CALL_RESULT\n\n def format_messages(self, messages: List[Dict[str, str]]) -> Tuple[str, str]:\n formatted_messages = self.PRE_PROMPT\n last_role = \"assistant\"\n no_user_prompt_start = False\n for message in messages:\n if message[\"role\"] == \"system\":\n formatted_messages += self.SYS_PROMPT_START + message[\"content\"] + self.SYS_PROMPT_END\n last_role = \"system\"\n if self.INCLUDE_SYS_PROMPT_IN_FIRST_USER_MESSAGE:\n formatted_messages = self.USER_PROMPT_START + formatted_messages\n no_user_prompt_start = True\n elif message[\"role\"] == \"user\":\n if no_user_prompt_start:\n no_user_prompt_start = False\n formatted_messages += message[\"content\"] + self.USER_PROMPT_END\n else:\n formatted_messages += self.USER_PROMPT_START + message[\"content\"] + self.USER_PROMPT_END\n last_role = \"user\"\n elif message[\"role\"] == \"assistant\":\n formatted_messages += self.ASSISTANT_PROMPT_START + message[\"content\"] + self.ASSISTANT_PROMPT_END\n last_role = \"assistant\"\n elif message[\"role\"] == \"function\":\n if self.USE_USER_ROLE_FUNCTION_CALL_RESULT:\n formatted_messages += self.USER_PROMPT_START + message[\"content\"] + self.USER_PROMPT_END\n last_role = \"user\"\n else:\n formatted_messages += self.FUNCTION_PROMPT_START + message[\"content\"] + self.FUNCTION_PROMPT_END\n last_role = \"function\"\n if last_role == \"system\" or last_role == \"user\":\n return formatted_messages + self.ASSISTANT_PROMPT_START.strip(), \"assistant\"\n return formatted_messages + self.USER_PROMPT_START.strip(), \"user\"\n\n def save(self, file_path: str):\n with open(file_path, 'w', encoding=\"utf-8\") as file:\n json.dump(self.as_dict(), file, indent=4)\n\n @staticmethod\n def load_from_file(file_path: str) -> \"MessagesFormatter\":\n with open(file_path, 'r', encoding=\"utf-8\") as file:\n loaded_messages_formatter = json.load(file)\n return MessagesFormatter(**loaded_messages_formatter)\n\n @staticmethod\n def load_from_dict(loaded_messages_formatter: dict) -> \"MessagesFormatter\":\n return MessagesFormatter(**loaded_messages_formatter)\n\n def as_dict(self) -> dict:\n return self.__dict__" }, { "identifier": "LlamaCppFunctionTool", "path": "src/llama_cpp_agent/function_calling.py", "snippet": "class LlamaCppFunctionTool:\n def __init__(self, pydantic_model: Type[BaseModel], has_markdown_code_block=False, has_triple_quoted_string=False,\n **additional_parameters):\n self.model = pydantic_model\n self.look_for_field_string = has_markdown_code_block or has_triple_quoted_string\n self.has_markdown_code_block = has_markdown_code_block\n self.has_triple_quoted_string = has_triple_quoted_string\n self.additional_parameters = additional_parameters if additional_parameters else {}\n\n def __call__(self, *args, **kwargs):\n return self.model(**kwargs)" }, { "identifier": "LlamaCppFunctionToolRegistry", "path": "src/llama_cpp_agent/function_calling.py", "snippet": "class LlamaCppFunctionToolRegistry:\n def __init__(self):\n self.tool_root = \"function\"\n self.tool_rule_content = \"function-parameters\"\n self.model_prefix = \"Function\"\n self.fields_prefix = \"Function Parameters\"\n self.function_tools = {}\n self.function_tools_containing_field_string = {}\n self.grammar = None\n self.grammar_documentation = None\n self.gbnf_grammar = None\n\n def register_function_tool(self, function_tool: LlamaCppFunctionTool):\n function_name = format_model_and_field_name(function_tool.model.__name__)\n if function_tool.look_for_field_string:\n self.function_tools_containing_field_string[function_name] = function_tool\n else:\n self.function_tools[function_name] = function_tool\n\n def get_function_tool(self, function_name: str):\n if function_name in self.function_tools:\n return self.function_tools[function_name]\n elif function_name in self.function_tools_containing_field_string:\n return self.function_tools_containing_field_string[function_name]\n else:\n return None\n\n def finalize(self):\n pydantic_function_models = []\n look_markdown_code_block = False\n for function_tool in self.function_tools.values():\n pydantic_function_models.append(function_tool.model)\n if function_tool.look_for_field_string:\n look_markdown_code_block = True\n for function_tool in self.function_tools_containing_field_string.values():\n pydantic_function_models.append(function_tool.model)\n if function_tool.look_for_field_string:\n look_markdown_code_block = True\n gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation(\n pydantic_function_models, look_markdown_code_block, look_markdown_code_block, self.tool_root,\n self.tool_rule_content, self.model_prefix,\n self.fields_prefix)\n\n self.grammar = LlamaGrammar.from_string(gbnf_grammar, verbose=False)\n self.grammar_documentation = documentation\n self.gbnf_grammar = gbnf_grammar\n\n def get_grammar(self):\n return self.grammar\n\n def get_documentation(self):\n return self.grammar_documentation\n\n def handle_function_call(self, function_call_response: str):\n try:\n for name, tool in self.function_tools_containing_field_string.items():\n\n if name in function_call_response:\n function_call, content = parse_json_response_with_markdown_code_block(function_call_response)\n if self.function_tools_containing_field_string[function_call[self.tool_root]].has_markdown_code_block:\n function_call[self.tool_rule_content][\"markdown_code_block\"] = content\n elif self.function_tools_containing_field_string[function_call[self.tool_root]].has_triple_quoted_string:\n function_call[self.tool_rule_content][\"triple_quoted_string\"] = content\n\n output = self.intern_function_call(function_call, with_markdown_code_block=True)\n return output\n\n function_call = parse_json_response(function_call_response)\n output = self.intern_function_call(function_call)\n return output\n\n except AttributeError as e:\n return f\"Error: {e}\"\n\n def intern_function_call(self, function_call: dict, with_markdown_code_block=False):\n if with_markdown_code_block:\n function_tool = self.function_tools_containing_field_string[function_call[self.tool_root]]\n else:\n function_tool = self.function_tools[function_call[self.tool_root]]\n try:\n cls = function_tool.model\n call_parameters = function_call[self.tool_rule_content]\n call = cls(**call_parameters)\n output = call.run(**function_tool.additional_parameters)\n return output\n except AttributeError as e:\n return f\"Error: {e}\"" } ]
import json from dataclasses import dataclass from typing import List, Dict, Literal, Callable, Union from llama_cpp import Llama, LlamaGrammar from .llm_settings import LlamaLLMSettings from .messages_formatter import MessagesFormatterType, get_predefined_messages_formatter, MessagesFormatter from .function_calling import LlamaCppFunctionTool, LlamaCppFunctionToolRegistry
2,719
@dataclass class StreamingResponse: text: str is_last_response: bool class LlamaCppAgent: """ A base agent that can be used for chat, structured output and function calling. Is used as part of all other agents. """ def __init__(self, model: Union[Llama, LlamaLLMSettings], name: str = "llamacpp_agent", system_prompt: str = "You are helpful assistant.", predefined_messages_formatter_type: MessagesFormatterType = MessagesFormatterType.CHATML, custom_messages_formatter: MessagesFormatter = None, debug_output: bool = False): if isinstance(model, LlamaLLMSettings): model = Llama(**model.as_dict()) self.model = model self.name = name self.system_prompt = system_prompt self.debug_output = debug_output self.messages = [] if custom_messages_formatter is not None: self.messages_formatter = custom_messages_formatter else: self.messages_formatter = get_predefined_messages_formatter(predefined_messages_formatter_type) @staticmethod def get_function_tool_registry(function_tool_list: List[LlamaCppFunctionTool]):
@dataclass class StreamingResponse: text: str is_last_response: bool class LlamaCppAgent: """ A base agent that can be used for chat, structured output and function calling. Is used as part of all other agents. """ def __init__(self, model: Union[Llama, LlamaLLMSettings], name: str = "llamacpp_agent", system_prompt: str = "You are helpful assistant.", predefined_messages_formatter_type: MessagesFormatterType = MessagesFormatterType.CHATML, custom_messages_formatter: MessagesFormatter = None, debug_output: bool = False): if isinstance(model, LlamaLLMSettings): model = Llama(**model.as_dict()) self.model = model self.name = name self.system_prompt = system_prompt self.debug_output = debug_output self.messages = [] if custom_messages_formatter is not None: self.messages_formatter = custom_messages_formatter else: self.messages_formatter = get_predefined_messages_formatter(predefined_messages_formatter_type) @staticmethod def get_function_tool_registry(function_tool_list: List[LlamaCppFunctionTool]):
function_tool_registry = LlamaCppFunctionToolRegistry()
5
2023-12-29 16:54:39+00:00
4k
usail-hkust/LLMTSCS
run_presslight.py
[ { "identifier": "config", "path": "utils/config.py", "snippet": "DIC_AGENTS = {\n \"Random\": RandomAgent,\n \"Fixedtime\": FixedtimeAgent,\n \"MaxPressure\": MaxPressureAgent,\n \"EfficientMaxPressure\": EfficientMaxPressureAgent,\n \"AdvancedMaxPressure\": AdvancedMaxPressureAgent,\n\n \"EfficientPressLight\": PressLightAgentOne,\n \"EfficientColight\": CoLightAgent,\n \"EfficientMPLight\": MPLightAgent,\n \"MPLight\": MPLightAgent,\n \"Colight\": CoLightAgent,\n\n \"AdvancedMPLight\": AdvancedMPLightAgent,\n \"AdvancedColight\": CoLightAgent,\n \"AdvancedDQN\": SimpleDQNAgentOne,\n \"Attend\": AttendLightAgent,\n \"ChatGPTTLCSWaitTimeForecast\": ChatGPTTLCS_Wait_Time_Forecast,\n \"ChatGPTTLCSCommonsense\": ChatGPTTLCS_Commonsense,\n \"ChatGPTTLCSCommonsenseFlowCoordination\": ChatGPTTLCS_Commonsense_Flow_Coordination,\n \"ChatGPTTLCSWaitTimeForecastCode\": ChatGPTTLCS_Wait_Time_Forecast_Code,\n \"ChatGPTTLCSCommonsenseCode\": ChatGPTTLCS_Commonsense_Code,\n \"ChatGPTTLCSCommonsenseFlowCoordinationCode\": ChatGPTTLCS_Commonsense_Flow_Coordination_Code,\n \"ChatGPTTLCSZeroKnowledge\": ChatGPTTLCS_Zero_Knowledge,\n \"ChatGPTTLCSZeroKnowledgeCode\": ChatGPTTLCS_Zero_Knowledge_Code,\n \"LLMTLCSWaitTimeForecast\": LLM_TLCS_Wait_Time_Forecast,\n \"LLMTLCSCommonsense\": LLM_TLCS_Commonsense,\n}\nDIC_PATH = {\n \"PATH_TO_MODEL\": \"model/default\",\n \"PATH_TO_WORK_DIRECTORY\": \"records/default\",\n \"PATH_TO_DATA\": \"data/template\",\n \"PATH_TO_PRETRAIN_MODEL\": \"model/default\",\n \"PATH_TO_ERROR\": \"errors/default\",\n}\nDIC_BASE_AGENT_CONF = {\n \"D_DENSE\": 20,\n \"LEARNING_RATE\": 0.001,\n \"PATIENCE\": 10,\n \"BATCH_SIZE\": 20,\n \"EPOCHS\": 100,\n \"SAMPLE_SIZE\": 3000,\n \"MAX_MEMORY_LEN\": 12000,\n\n \"UPDATE_Q_BAR_FREQ\": 5,\n \"UPDATE_Q_BAR_EVERY_C_ROUND\": False,\n\n \"GAMMA\": 0.8,\n \"NORMAL_FACTOR\": 20,\n\n \"EPSILON\": 0.8,\n \"EPSILON_DECAY\": 0.95,\n \"MIN_EPSILON\": 0.2,\n \"LOSS_FUNCTION\": \"mean_squared_error\",\n}\nDIC_CHATGPT_AGENT_CONF = {\n \"GPT_VERSION\": \"gpt-4\",\n \"LOG_DIR\": \"../GPT_logs\"\n}\nDIC_FIXEDTIME_AGENT_CONF = {\n \"FIXED_TIME\": [30, 30, 30, 30]\n}\nDIC_MAXPRESSURE_AGENT_CONF = {\n \"FIXED_TIME\": [30, 30, 30, 30]\n}" }, { "identifier": "error", "path": "utils/error.py", "snippet": "class flowFileException(Exception):\n def __init__(self, message):\n def __str__(self):" }, { "identifier": "pipeline_wrapper", "path": "utils/utils.py", "snippet": "def pipeline_wrapper(dic_agent_conf, dic_traffic_env_conf, dic_path, roadnet, trafficflow):\n results_table = []\n all_rewards = []\n all_queue_len = []\n all_travel_time = []\n for i in range(5):\n dic_path[\"PATH_TO_MODEL\"] = (dic_path[\"PATH_TO_MODEL\"].split(\".\")[0] + \".json\" +\n time.strftime('%m_%d_%H_%M_%S', time.localtime(time.time())))\n dic_path[\"PATH_TO_WORK_DIRECTORY\"] = (dic_path[\"PATH_TO_WORK_DIRECTORY\"].split(\".\")[0] + \".json\" +\n time.strftime('%m_%d_%H_%M_%S', time.localtime(time.time())))\n ppl = Pipeline(dic_agent_conf=dic_agent_conf,\n dic_traffic_env_conf=dic_traffic_env_conf,\n dic_path=dic_path,\n roadnet=roadnet,\n trafficflow=trafficflow)\n round_results = ppl.run(round=i, multi_process=False)\n results_table.append([round_results['test_reward_over'], round_results['test_avg_queue_len_over'], round_results['test_avg_travel_time_over']])\n all_rewards.append(round_results['test_reward_over'])\n all_queue_len.append(round_results['test_avg_queue_len_over'])\n all_travel_time.append(round_results['test_avg_travel_time_over'])\n\n # delete junk\n cmd_delete_model = 'find <dir> -type f ! -name \"round_<round>_inter_*.h5\" -exec rm -rf {} \\;'.replace(\"<dir>\", dic_path[\"PATH_TO_MODEL\"]).replace(\"<round>\", str(int(dic_traffic_env_conf[\"NUM_ROUNDS\"] - 1)))\n cmd_delete_work = 'find <dir> -type f ! -name \"state_action.json\" -exec rm -rf {} \\;'.replace(\"<dir>\", dic_path[\"PATH_TO_WORK_DIRECTORY\"])\n os.system(cmd_delete_model)\n os.system(cmd_delete_work)\n\n results_table.append([np.average(all_rewards), np.average(all_queue_len), np.average(all_travel_time)])\n results_table.append([np.std(all_rewards), np.std(all_queue_len), np.std(all_travel_time)])\n\n table_logger = wandb.init(\n project=dic_traffic_env_conf['PROJECT_NAME'],\n group=f\"{dic_traffic_env_conf['MODEL']}-{roadnet}-{trafficflow}-{len(dic_traffic_env_conf['PHASE'])}_Phases\",\n name=\"exp_results\",\n config=merge(merge(dic_agent_conf, dic_path), dic_traffic_env_conf),\n )\n columns = [\"reward\", \"avg_queue_len\", \"avg_travel_time\"]\n logger_table = wandb.Table(columns=columns, data=results_table)\n table_logger.log({\"results\": logger_table})\n wandb.finish()\n\n print(\"pipeline_wrapper end\")\n return" }, { "identifier": "merge", "path": "utils/utils.py", "snippet": "def merge(dic_tmp, dic_to_change):\n dic_result = copy.deepcopy(dic_tmp)\n dic_result.update(dic_to_change)\n return dic_result" } ]
from utils import config, error from utils.utils import pipeline_wrapper, merge from multiprocessing import Process import time import argparse import os
2,049
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--memo", type=str, default='PressLight') parser.add_argument("--mod", type=str, default="EfficientPressLight") # EPressLight parser.add_argument("--model", type=str, default="PressLight") parser.add_argument("--proj_name", type=str, default="chatgpt-TSCS") parser.add_argument("--eightphase", action="store_true", default=False) parser.add_argument("--gen", type=int, default=1) parser.add_argument("--multi_process", action="store_true", default=True) parser.add_argument("--workers", type=int, default=1) parser.add_argument("--dataset", type=str, default="jinan") parser.add_argument("--traffic_file", type=str, default="anon_3_4_jinan_real.json") parser.add_argument("--duration", type=int, default=30) parser.add_argument("--num_rounds", type=int, default=100) return parser.parse_args() def main(in_args=None): traffic_file_list = [] if in_args.dataset == 'jinan': count = 3600 road_net = "3_4" traffic_file_list = ["anon_3_4_jinan_real.json", "anon_3_4_jinan_real_2000.json", "anon_3_4_jinan_real_2500.json"] num_rounds = in_args.num_rounds template = "Jinan" elif in_args.dataset == 'hangzhou': count = 3600 road_net = "4_4" traffic_file_list = ["anon_4_4_hangzhou_real.json", "anon_4_4_hangzhou_real_5816.json"] num_rounds = in_args.num_rounds template = "Hangzhou" elif in_args.dataset == 'newyork_16x3': count = 3600 road_net = "16_3" traffic_file_list = ["anon_16_3_newyork_real.json"] num_rounds = 80 template = "NewYork" elif in_args.dataset == 'newyork_28x7': count = 3600 road_net = "28_7" traffic_file_list = ["anon_28_7_newyork_real_double.json", "anon_28_7_newyork_real_triple.json"] num_rounds = 80 template = "NewYork" # flow_file error try: if in_args.traffic_file not in traffic_file_list:
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--memo", type=str, default='PressLight') parser.add_argument("--mod", type=str, default="EfficientPressLight") # EPressLight parser.add_argument("--model", type=str, default="PressLight") parser.add_argument("--proj_name", type=str, default="chatgpt-TSCS") parser.add_argument("--eightphase", action="store_true", default=False) parser.add_argument("--gen", type=int, default=1) parser.add_argument("--multi_process", action="store_true", default=True) parser.add_argument("--workers", type=int, default=1) parser.add_argument("--dataset", type=str, default="jinan") parser.add_argument("--traffic_file", type=str, default="anon_3_4_jinan_real.json") parser.add_argument("--duration", type=int, default=30) parser.add_argument("--num_rounds", type=int, default=100) return parser.parse_args() def main(in_args=None): traffic_file_list = [] if in_args.dataset == 'jinan': count = 3600 road_net = "3_4" traffic_file_list = ["anon_3_4_jinan_real.json", "anon_3_4_jinan_real_2000.json", "anon_3_4_jinan_real_2500.json"] num_rounds = in_args.num_rounds template = "Jinan" elif in_args.dataset == 'hangzhou': count = 3600 road_net = "4_4" traffic_file_list = ["anon_4_4_hangzhou_real.json", "anon_4_4_hangzhou_real_5816.json"] num_rounds = in_args.num_rounds template = "Hangzhou" elif in_args.dataset == 'newyork_16x3': count = 3600 road_net = "16_3" traffic_file_list = ["anon_16_3_newyork_real.json"] num_rounds = 80 template = "NewYork" elif in_args.dataset == 'newyork_28x7': count = 3600 road_net = "28_7" traffic_file_list = ["anon_28_7_newyork_real_double.json", "anon_28_7_newyork_real_triple.json"] num_rounds = 80 template = "NewYork" # flow_file error try: if in_args.traffic_file not in traffic_file_list:
raise error.flowFileException('Flow file does not exist.')
1
2023-12-26 08:31:47+00:00
4k
alipay/private_llm
demo/edge_device.py
[ { "identifier": "PLLlamaConfig", "path": "demo/model.py", "snippet": "class PLLlamaConfig(LlamaConfig):\n def __init__(\n self,\n vocab_size=32000,\n hidden_size=4096,\n intermediate_size=11008,\n num_hidden_layers=32,\n num_attention_heads=32,\n num_key_value_heads=None,\n hidden_act=\"silu\",\n max_position_embeddings=2048,\n initializer_range=0.02,\n rms_norm_eps=0.000001,\n use_cache=True,\n pad_token_id=None,\n bos_token_id=1,\n eos_token_id=2,\n pretraining_tp=1,\n tie_word_embeddings=False,\n rope_theta=10000,\n rope_scaling=None,\n attention_bias=False,\n rcd=128,\n rdc=128,\n cloud_ip=\"127.0.0.1\",\n cloud_port=12345,\n layers_to_transform=None,\n **kwargs,\n ):\n super().__init__(\n vocab_size,\n hidden_size,\n intermediate_size,\n num_hidden_layers,\n num_attention_heads,\n num_key_value_heads,\n hidden_act,\n max_position_embeddings,\n initializer_range,\n rms_norm_eps,\n use_cache,\n pad_token_id,\n bos_token_id,\n eos_token_id,\n pretraining_tp,\n tie_word_embeddings,\n rope_theta,\n rope_scaling,\n attention_bias,\n **kwargs,\n )\n self.rcd = rcd\n self.rdc = rdc\n self.cloud_ip = cloud_ip\n self.cloud_port = cloud_port\n self.layers_to_transform = layers_to_transform" }, { "identifier": "LlamaForDevice", "path": "demo/model.py", "snippet": "class LlamaForDevice(PLLlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config: PLLlamaConfig):\n super().__init__(config)\n self.padding_idx = config.pad_token_id\n\n self.embed_tokens = nn.Embedding(\n config.vocab_size, config.hidden_size, self.padding_idx\n )\n from pl_lib import PLMStack\n\n if config.layers_to_transform is None:\n num_layers = config.num_hidden_layers\n else:\n num_layers = len(config.layers_to_transform)\n\n self.lora_M_stack = PLMStack(num_layers, config.rcd, config.rdc)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n self.new_gens = []\n self.tokenizer = None\n # Initialize weights and apply final processing\n self.post_init()\n\n def set_tokenizer(self, tokenizer):\n \"\"\"set tokenizer for stream\n\n Args:\n tokenizer (_type_): _description_\n \"\"\"\n self.tokenizer = tokenizer\n\n def get_output_embeddings(self):\n return self.lm_head\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(\n output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC\n )\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n s: socket.socket = None,\n comm_profiler: CommProfiler = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n if s is None:\n print(\"no socket provided to s\")\n return\n\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n inputs_embeds = self.embed_tokens(input_ids)\n send_tensor(s, inputs_embeds, profiler=comm_profiler) # initial communication\n\n self.lora_M_stack(s, profiler=comm_profiler)\n\n hidden_states = recv_tensor(s, buffer_size=int(4096e5)) # final comm\n if self.prefill_end is None:\n # time it for throughput analysis\n self.prefill_end = time.time()\n s.sendall(\"hi\".encode())\n past_kv_shape = recv_tensor(s, buffer_size=1024)\n # hidden_states = outputs[0] # og\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(\n self.vocab_size // self.config.pretraining_tp, dim=0\n )\n logits = [\n F.linear(hidden_states, lm_head_slices[i])\n for i in range(self.config.pretraining_tp)\n ]\n logits = torch.cat(logits, dim=-1)\n else:\n logits = self.lm_head(hidden_states)\n logits = logits.float()\n # stream output\n if self.tokenizer is not None:\n token_id = torch.argmax(logits, -1).item()\n old_full = self.tokenizer.decode(self.new_gens)\n self.new_gens.append(token_id)\n new_full = self.tokenizer.decode(self.new_gens)\n print(new_full[len(old_full) :], end=\"\", flush=True)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n bs, h, seq, hi = past_kv_shape.tolist()\n past_key_values = [[torch.zeros((int(bs), int(h), int(seq + 1), int(hi)))]]\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=past_key_values, # mock on edge side\n )\n\n def prepare_inputs_for_generation(\n self,\n input_ids,\n past_key_values=None,\n attention_mask=None,\n inputs_embeds=None,\n **kwargs,\n ):\n if past_key_values is not None:\n past_length = past_key_values[0][0].shape[2]\n\n # Some generation methods already pass only the last input ID\n if input_ids.shape[1] > past_length:\n remove_prefix_length = past_length\n else:\n # Default to old behavior: keep only final ID\n remove_prefix_length = input_ids.shape[1] - 1\n\n input_ids = input_ids[:, remove_prefix_length:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -input_ids.shape[1] :]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n s = kwargs.get(\"s\", None)\n comm_profiler = kwargs.get(\"comm_profiler\", None)\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n \"s\": s,\n \"comm_profiler\": comm_profiler,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(\n past_state.index_select(0, beam_idx.to(past_state.device))\n for past_state in layer_past\n ),\n )\n return reordered_past\n\n def my_generate(self, *args, **kwargs):\n \"\"\"simple wrapper to tell cloud when a new query comes in\"\"\"\n s = kwargs.get(\"s\", None)\n profiler = kwargs.get(\"comm_profiler\", None)\n speed_profile = kwargs.pop(\"speed_profile\", False)\n if s is None:\n print(\"no socket\")\n return\n s.sendall(\"new\".encode())\n s.recv(1024)\n with torch.no_grad():\n if speed_profile:\n self.prefill_end = None\n self.prefill_start = time.time()\n self.new_gens = []\n outs = self.generate(*args, **kwargs)\n if speed_profile:\n self.decode_end = time.time()\n s.sendall(\"finish\".encode())\n\n if profiler is not None:\n profiler.get_report()\n\n if speed_profile:\n self.decode_time = self.decode_end - self.prefill_end\n self.prefill_time = self.prefill_end - self.prefill_start\n self.decode_tokens = outs.shape[1] - kwargs.get(\"input_ids\").shape[1]\n self.prefill_tokens = kwargs.get(\"input_ids\").shape[1]\n print(\"Throughput Stats\")\n\n table = PrettyTable()\n\n # 设置列名\n table.field_names = [\"Stages\", \"Tokens\", \"Time\", \"TPS\"]\n\n table.align[\"Stages\"] = \"l\" # \"l\" 对应左对齐\n table.align[\"Tokens\"] = \"r\" # \"r\" 对应右对齐\n table.align[\"Time\"] = \"r\" # \"r\" 对应右对齐\n table.align[\"TPS\"] = \"r\" # \"r\" 对应右对齐\n table.add_row(\n [\n \"prefill\",\n self.prefill_tokens,\n round(self.prefill_time, 2),\n round(self.prefill_tokens / self.prefill_time, 2),\n ]\n )\n table.add_row(\n [\n \"decode\",\n self.decode_tokens,\n round(self.decode_time, 2),\n round(self.decode_tokens / self.decode_time, 2),\n ]\n )\n print(table)\n\n return outs\n\n def print_param_count(self):\n m_cnt = 0\n lmh = 0\n emb = 0\n for n, p in self.named_parameters():\n if \"lora\" in n:\n m_cnt += p.numel()\n elif \"lm_head\" in n:\n lmh += p.numel()\n elif \"emb\" in n:\n emb += p.numel()\n total = m_cnt + lmh + emb\n\n table = PrettyTable()\n\n # 设置列名\n table.field_names = [\"Modules\", \"Param #\", \"Param %\"]\n\n # 设置每列的对齐方式\n table.align[\"Modules\"] = \"l\" # \"l\" 对应左对齐\n table.align[\"Param #\"] = \"r\" # \"r\" 对应右对齐\n table.align[\"Param %\"] = \"r\" # \"r\" 对应右对齐\n table.add_row([\"word emb\", emb, round(emb / total * 100, 2)])\n table.add_row([\"PrivateLoRA M\", m_cnt, round(m_cnt / total * 100, 2)])\n table.add_row([\"lm head\", lmh, round(lmh / total * 100, 2)])\n print(\"Param statistics\")\n print(table)" } ]
from demo.model import PLLlamaConfig, LlamaForDevice from pl_lib import CommProfiler from transformers import AutoTokenizer from pl_lib import init_tcp_b import torch import logging import argparse
3,586
parser = argparse.ArgumentParser() parser.add_argument( "weight_path", default=None, help="path to device model weight", ) parser.add_argument( "llama_path", default=None, help="root dir of huggingface llama model, should contain weight files and config", ) parser.add_argument( "--ip", default="127.0.0.1", help="socket ip of cloud", ) parser.add_argument( "--port", default=12345, help="socket port of cloud", ) parser.add_argument( "--device", default="cpu", help="device of model", ) parser.add_argument( "--debug", default=False, ) args = parser.parse_args() log_format = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s" logging.basicConfig( level=logging.DEBUG if args.debug else logging.INFO, format=log_format ) if __name__ == "__main__": mock_small = True load_weights = False logging.info("start connecting...") s = init_tcp_b(args.ip, args.port)
parser = argparse.ArgumentParser() parser.add_argument( "weight_path", default=None, help="path to device model weight", ) parser.add_argument( "llama_path", default=None, help="root dir of huggingface llama model, should contain weight files and config", ) parser.add_argument( "--ip", default="127.0.0.1", help="socket ip of cloud", ) parser.add_argument( "--port", default=12345, help="socket port of cloud", ) parser.add_argument( "--device", default="cpu", help="device of model", ) parser.add_argument( "--debug", default=False, ) args = parser.parse_args() log_format = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s" logging.basicConfig( level=logging.DEBUG if args.debug else logging.INFO, format=log_format ) if __name__ == "__main__": mock_small = True load_weights = False logging.info("start connecting...") s = init_tcp_b(args.ip, args.port)
config = PLLlamaConfig.from_pretrained(args.llama_path)
0
2023-12-25 06:28:04+00:00
4k
ohadmata/shmessy
src/shmessy/types_handler.py
[ { "identifier": "Field", "path": "src/shmessy/schema.py", "snippet": "class Field(InferredField, BaseField):\n pass" }, { "identifier": "BaseType", "path": "src/shmessy/types/base.py", "snippet": "class BaseType(ABC):\n weight: int = 0\n validator_types: Tuple[ValidatorTypes]\n\n @abstractmethod\n def validate(self, data: ndarray) -> Optional[InferredField]:\n pass\n\n @abstractmethod\n def fix(self, column: Series, inferred_field: InferredField) -> Series:\n pass\n\n def is_validator_type_valid(self, dtype: Type) -> bool:\n for possible_validator_type in self.validator_types:\n if self._check_single_validator_type(dtype, possible_validator_type):\n return True\n return False\n\n @staticmethod\n def _check_single_validator_type(\n dtype: Type, possible_validator_type: ValidatorTypes\n ) -> bool:\n if possible_validator_type == ValidatorTypes.NUMERIC and not issubdtype(\n dtype, number\n ):\n return False\n\n if possible_validator_type == ValidatorTypes.STRING and not (\n issubdtype(dtype, object_) or issubdtype(dtype, str_)\n ):\n return False\n return True\n\n @property\n def name(self) -> str:\n return str(self.__class__.__name__.replace(\"Type\", \"\"))" }, { "identifier": "BooleanType", "path": "src/shmessy/types/boolean.py", "snippet": "class BooleanType(BaseType):\n weight = 1\n validator_types = (ValidatorTypes.STRING, ValidatorTypes.NUMERIC)\n patterns: list[Tuple] = [ # The first member should be the true value\n (\"YES\", \"NO\"),\n (\"TRUE\", \"FALSE\"),\n (\"T\", \"F\"),\n (\"Y\", \"N\"),\n (1, 0),\n ]\n\n @staticmethod\n def _validate_value_pattern(data: ndarray, pattern: Tuple) -> bool:\n for value in data:\n if isinstance(value, str):\n value = value.lower()\n if isinstance(pattern[0], str):\n pattern = (pattern[0].lower(), pattern[1].lower())\n if value not in (pattern[0], pattern[1]):\n return False\n return True\n\n def validate(self, data: ndarray) -> Optional[InferredField]:\n if not self.is_validator_type_valid(dtype=data.dtype):\n return None\n\n for pattern in self.patterns:\n if self._validate_value_pattern(data, pattern):\n return InferredField(inferred_type=self.name, inferred_pattern=pattern)\n\n def fix(self, column: Series, inferred_field: InferredField) -> Series:\n if isinstance(inferred_field.inferred_pattern[0], str):\n return column.apply(\n lambda x: True\n if x.lower() == inferred_field.inferred_pattern[0].lower()\n else False\n )\n\n return column.apply(\n lambda x: True if x == inferred_field.inferred_pattern[0] else False\n )" }, { "identifier": "DatetimeType", "path": "src/shmessy/types/datetime_.py", "snippet": "class DatetimeType(BaseType):\n weight = 3\n validator_types = (ValidatorTypes.STRING,)\n patterns: list[str] = [\n \"%m/%d/%Y %-H:%M\", # 11/14/2003 0:00\n \"%d-%m-%Y %H:%M\", # 11-14-2003 00:00\n \"%d-%m-%Y %-H:%M\", # 11-14-2003 0:00\n \"%m/%d/%y %H:%M:%S\", # 12/15/22 00:00:00\n \"%m-%d-%y %H:%M:%S\", # 12-30-2022 00:00:00\n \"%m/%d/%Y %H:%M:%S\", # 12/30/2022 00:00:00\n \"%m-%d-%Y %H:%M:%S\", # 12-30-2022 00:00:00\n \"%Y/%m/%d %H:%M:%S\", # 2022/12/30 00:00:00\n \"%Y-%m-%d %H:%M:%S\", # 2022-12-30 00:00:00\n \"%Y-%m-%d %H:%M:%SZ\", # 2022-12-30 00:00:00Z\n \"%Y-%m-%dT%H:%M:%SZ\", # 2022-12-30T00:00:00Z\n \"%Y-%m-%dT%H:%M:%S.%f\", # 2022-12-30T00:00:00.000\n \"%Y-%m-%d %H:%M:%S.%fZ\", # 2022-12-30 00:00:00.000Z\n \"%Y-%m-%d %H:%M:%S.%f\", # 2022-12-30 00:00:00.000\n \"%Y-%m-%dT%H:%M:%S.%fZ\", # 2022-12-30T00:00:00.000Z\n \"%b %-d, %Y %H:%M %p\", # Jul 3, 2023 12:10 PM\n ]\n\n def validate(self, data: ndarray) -> Optional[InferredField]:\n if not self.is_validator_type_valid(dtype=data.dtype):\n return None\n\n for pattern in self.patterns:\n if validate_strptime_pattern(data, pattern):\n return InferredField(inferred_type=self.name, inferred_pattern=pattern)\n\n def fix(self, column: Series, inferred_field: InferredField) -> Series:\n return to_datetime(column, format=inferred_field.inferred_pattern)" }, { "identifier": "FloatType", "path": "src/shmessy/types/float.py", "snippet": "class FloatType(BaseType):\n weight = 8\n validator_types = (ValidatorTypes.STRING, ValidatorTypes.NUMERIC)\n\n def validate(self, data: ndarray) -> Optional[InferredField]:\n for column in data:\n try:\n float(column)\n except Exception: # noqa\n return None\n return InferredField(inferred_type=self.name)\n\n def fix(self, column: Series, inferred_field: InferredField) -> Series:\n raise NotImplementedError()" }, { "identifier": "IntegerType", "path": "src/shmessy/types/integer.py", "snippet": "class IntegerType(BaseType):\n weight = 7\n validator_types = (ValidatorTypes.STRING, ValidatorTypes.NUMERIC)\n\n def validate(self, data: ndarray) -> Optional[InferredField]:\n for column in data:\n try:\n int(column)\n except Exception: # noqa\n return None\n return InferredField(inferred_type=self.name)\n\n def fix(self, column: Series, inferred_field: InferredField) -> Series:\n raise NotImplementedError()" }, { "identifier": "StringType", "path": "src/shmessy/types/string.py", "snippet": "class StringType(BaseType):\n weight = 9\n validator_types = (ValidatorTypes.STRING,)\n\n def validate(self, data: ndarray) -> Optional[InferredField]:\n for column in data:\n try:\n str(column)\n except Exception: # noqa\n return None\n return InferredField(inferred_type=self.name)\n\n def fix(self, column: Series, inferred_field: InferredField) -> Series:\n raise NotImplementedError()" } ]
import logging import os from importlib import import_module from types import ModuleType from typing import Any, Dict, List, Optional, Type from numpy import ndarray from numpy.dtypes import ( BoolDType, DateTime64DType, Float16DType, Float32DType, Float64DType, Int8DType, Int16DType, Int32DType, Int64DType, IntDType, ObjectDType, StrDType, ) from pandas import Series from .schema import Field from .types.base import BaseType from .types.boolean import BooleanType from .types.datetime_ import DatetimeType from .types.float import FloatType from .types.integer import IntegerType from .types.string import StringType
1,977
logger = logging.getLogger(__name__) class TypesHandler: PACKAGE_NAME: str = "shmessy" TYPES_DIR: str = "types" def __init__(self): self.__types = self._discover_types()
logger = logging.getLogger(__name__) class TypesHandler: PACKAGE_NAME: str = "shmessy" TYPES_DIR: str = "types" def __init__(self): self.__types = self._discover_types()
self.__types_as_dict: Dict[str, BaseType] = self._types_as_dict(self.__types)
1
2023-12-27 20:15:01+00:00
4k
kokiez/solana-sniper
main.py
[ { "identifier": "getSymbol", "path": "birdeye.py", "snippet": "def getSymbol(token):\r\n # usdc and usdt\r\n exclude = ['EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB']\r\n \r\n if token not in exclude:\r\n url = f\"https://api.dexscreener.com/latest/dex/tokens/{token}\"\r\n\r\n Token_Symbol = \"\"\r\n Sol_symbol=\"\"\r\n try:\r\n response = requests.get(url)\r\n\r\n # Check if the request was successful (status code 200)\r\n if response.status_code == 200:\r\n resp = response.json()\r\n print(\"Response:\",resp['pairs'][0]['baseToken']['symbol'])\r\n for pair in resp['pairs']:\r\n quoteToken = pair['quoteToken']['symbol']\r\n\r\n if quoteToken == 'SOL':\r\n Token_Symbol = pair['baseToken']['symbol']\r\n Sol_symbol = quoteToken\r\n return Token_Symbol, Sol_symbol\r\n\r\n\r\n else:\r\n print(f\"[getSymbol] Request failed with status code {response.status_code}\")\r\n\r\n except requests.exceptions.RequestException as e:\r\n print(f\"[getSymbol] error occurred: {e}\")\r\n except: \r\n a = 1\r\n\r\n return Token_Symbol, Sol_symbol\r\n else:\r\n if token == 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v':\r\n return \"USDC\", \"SOL\"\r\n elif token == 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v':\r\n return \"USDT\", \"SOL\"\r" }, { "identifier": "select_amm2trade", "path": "amm_selection.py", "snippet": "def select_amm2trade(token_address,payer, ctx, event_thread):\r\n config = ConfigParser()\r\n # using sys and os because sometimes this shitty config reader does not read from curr directory\r\n config.read(os.path.join(sys.path[0], 'data', 'config.ini'))\r\n \"\"\"\r\n Import the variables from config.ini\r\n \"\"\"\r\n invest_ratio = float(config.get(\"INVESTMENT\", \"invest_ratio\"))\r\n invest_amount_sol = float(config.get(\"INVESTMENT\", \"invest_amount_in_sol\"))\r\n\r\n\r\n limit_order_sell_Bool = config.getboolean(\"INVESTMENT\", \"limit_order_sell\")\r\n take_profit_ratio = float(config.get(\"INVESTMENT\", \"take_profit_ratio\"))\r\n\r\n trailing_stop_Bool = config.getboolean(\"INVESTMENT\", \"trailing_stop\")\r\n trailing_stop_ratio = float(config.get(\"INVESTMENT\", \"trailing_stop_ratio\"))\r\n\r\n Limit_and_Trailing_Stop_Bool = config.getboolean(\"INVESTMENT\", \"Limit_and_Trailing_Stop\")\r\n\r\n desired_token_address= token_address\r\n \r\n \"\"\"\r\n get your investment ratio/amount...\r\n get balance of your wallet\r\n check if you have enough balance to invest or not. \r\n \r\n balance - investment = remaining amount\r\n if remaining amount if less than minimum balance for fees then it will not trade\r\n e.g. 0.01 Sol required for fees in my case...\r\n \"\"\"\r\n if invest_ratio == 0: \r\n amount_of_sol_to_swap = int(invest_amount_sol * LAMPORTS_PER_SOL)\r\n else:\r\n currBalance = ctx.get_balance(payer.pubkey()).value\r\n balanceAfterRatio = currBalance * (invest_ratio / 100)\r\n amount_of_sol_to_swap = int(currBalance - balanceAfterRatio)\r\n temp = amount_of_sol_to_swap / LAMPORTS_PER_SOL\r\n \r\n accountBalance = ctx.get_balance(payer.pubkey())\r\n\r\n token_symbol, SOl_Symbol = getSymbol(desired_token_address)\r\n sendWebhook(f\"w|WALLET INFO {token_symbol}\",f\"SOL: {accountBalance.value / LAMPORTS_PER_SOL}\")\r\n\r\n miniumBalanceRequired = 10000000 #for txn fees etc\r\n \r\n remainingAfterInvest = accountBalance.value - amount_of_sol_to_swap\r\n\r\n\r\n if check_token_existence(desired_token_address) == False:\r\n\r\n\r\n if remainingAfterInvest < miniumBalanceRequired:\r\n print(\"[Wallet Info] Insufficient Balance: {:.10f}\".format(remainingAfterInvest))\r\n sendWebhook(f\"a|Wallet Info {token_symbol}\",\"Insufficient Balance: {:.10f}\".format(remainingAfterInvest))\r\n event_thread.set()\r\n\r\n else: \r\n \"\"\"\r\n Get current price of token\r\n start trading it...\r\n \"\"\"\r\n # API calls are limited to 300 requests per minute\r\n desired_token_usd_price = get_price(desired_token_address)\r\n print(f\"Token Address: {desired_token_address}\\nToken Price USD: {desired_token_usd_price:.15f}\")\r\n sendWebhook(f\"a|Token Market Info {token_symbol}\",f\"Token Address: {desired_token_address}\\nToken Price USD: {desired_token_usd_price:.15f}\")\r\n\r\n \"\"\"\r\n ..............................................\r\n Jupiter Swap Starts here\r\n ..............................................\r\n \"\"\"\r\n # Call Buy Method - returns transaction hash (txB= tx for buy)\r\n start_time = time.time()\r\n txB = buy(payer, ctx, amount_of_sol_to_swap, token_address, config)\r\n end_time = time.time()\r\n execution_time = end_time - start_time\r\n print(f\"Total Execution time: {execution_time} seconds\")\r\n \r\n # Check if transaction wasnt success\r\n if str(txB) != 'failed':\r\n\r\n \"\"\"\r\n You can delete the token from the file but you cannot stop the previous thread if you executed another buy...\r\n \"\"\"\r\n write_token_to_file(desired_token_address)\r\n\r\n bought_token_price = get_price(desired_token_address)\r\n # Save the settings into a file\r\n storeSettings(\"Jupiter\",\r\n desired_token_address,\r\n txB,\r\n execution_time,\r\n limit_order_sell_Bool,\r\n take_profit_ratio,\r\n trailing_stop_Bool,\r\n trailing_stop_ratio,\r\n Limit_and_Trailing_Stop_Bool,\r\n bought_token_price)\r\n\r\n\r\n event_thread.set() #continue other threads\r\n jupiter_swap(config, ctx, payer, desired_token_address, txB, execution_time, limit_order_sell_Bool, take_profit_ratio, trailing_stop_Bool, trailing_stop_ratio, Limit_and_Trailing_Stop_Bool, bought_token_price)\r\n\r\n \r\n else:\r\n print(\"[Jupiter] Buy Failed\")\r\n sendWebhook(f\"e|Jupiter {token_symbol}\",f\"Jupiter Officially failed....\\nNow trying Raydium\")\r\n \"\"\"\r\n if Jupiter fails, then try Raydium\r\n (if you intrested why? read more about it in docs, liquidity and why some coins are not added to jupiter)\r\n\r\n Raydium Swap Starts here\r\n\r\n Just use Raydium man, its much faster and better than Jupiter because of liquidity\r\n But issue is that if pair is new then pools gonna be downloaded again... which slows the transaction by 10 seconds\r\n \"\"\"\r\n print(\"---------------[Raydium] Buy Started---------------\")\r\n start_time = time.time()\r\n # [Raydium] - \r\n sendWebhook(f\"a|[Raydium] - {token_symbol}\",f\"Raydium Officially started....\")\r\n\r\n # in sol (not in lamports)\r\n txB_R = raydium_buy(ctx, desired_token_address, payer, invest_amount_sol)\r\n \r\n end_time = time.time()\r\n execution_time = end_time - start_time\r\n print(f\"Total Execution time: {execution_time} seconds\")\r\n event_thread.set()\r\n if txB_R != \"failed\":\r\n \"\"\"\r\n You can delete the token from the file but you cannot stop the previous thread if you executed another buy...\r\n \"\"\"\r\n write_token_to_file(desired_token_address)\r\n\r\n bought_token_price = get_price(desired_token_address)\r\n # Save the settings into a file\r\n storeSettings(\"Raydium\",\r\n desired_token_address,\r\n txB,\r\n execution_time,\r\n limit_order_sell_Bool,\r\n take_profit_ratio,\r\n trailing_stop_Bool,\r\n trailing_stop_ratio,\r\n Limit_and_Trailing_Stop_Bool,\r\n bought_token_price)\r\n \r\n event_thread.set() #continue other threads\r\n \r\n raydium_swap_monitor_sell(config, ctx, payer, desired_token_address, txB_R, execution_time, limit_order_sell_Bool, take_profit_ratio, trailing_stop_Bool, trailing_stop_ratio, Limit_and_Trailing_Stop_Bool, bought_token_price)\r\n \r\n else:\r\n print(\"[Raydium] Buy Failed\")\r\n sendWebhook(f\"e|Raydium {token_symbol}\",f\"Raydium Officially failed....\")\r\n\r\n event_thread.set()\r\n\r\n else:\r\n \"\"\"\r\n Token already exists in files...\r\n \"\"\"\r\n \r\n Config_settings = getSettings(desired_token_address)\r\n event_thread.set() #continue other threads\r\n\r\n try:\r\n amm_name = Config_settings['amm']\r\n txB = Config_settings['txB']\r\n execution_time = Config_settings['execution_time']\r\n limit_order_sell_Bool = Config_settings['limit_order_sell_Bool']\r\n take_profit_ratio = Config_settings['take_profit_ratio']\r\n trailing_stop_Bool = Config_settings['trailing_stop_Bool']\r\n trailing_stop_ratio = Config_settings['trailing_stop_ratio']\r\n Limit_and_Trailing_Stop_Bool = Config_settings['Limit_and_Trailing_Stop_Bool']\r\n bought_token_price = Config_settings['bought_token_price']\r\n\r\n \r\n if checkB(desired_token_address, payer, ctx) == True:\r\n\r\n sendWebhook(f\"a|Wallet Info {token_symbol}\",f\"Token already exists in files and ***wallet***\\nNow Re-Selling it...\\ntrying Jupiter...\")\r\n\r\n if amm_name == 'Jupiter':\r\n jupiter_swap(config, ctx, payer, desired_token_address, txB, execution_time, limit_order_sell_Bool, take_profit_ratio, trailing_stop_Bool, trailing_stop_ratio, Limit_and_Trailing_Stop_Bool, bought_token_price)\r\n \r\n elif amm_name == 'Raydium':\r\n # if amm_name == 'Raydium':\r\n\r\n raydium_swap_monitor_sell(config, ctx, payer, desired_token_address, txB, execution_time, limit_order_sell_Bool, take_profit_ratio, trailing_stop_Bool, trailing_stop_ratio, Limit_and_Trailing_Stop_Bool, bought_token_price)\r\n\r\n else:\r\n sendWebhook(f\"a|Wallet Info {token_symbol}\",f\"Token not found in wallet...\\n\")\r\n\r\n\r\n except Exception as e:\r\n print(f\"Config file missing settings, ERROR: {e}\")\r\n sendWebhook(f\"a|Config file {token_symbol}\",f\"Config file missing settings!\\n{e}\")\r\n\r\n print(\"--------------------------END--------------------------------\")\r" }, { "identifier": "sendWebhook", "path": "webhook.py", "snippet": "def sendWebhook(title_type_info, description):\r\n global error_webhook\r\n global webhook_url\r\n title = \"\"\r\n title_type = title_type_info.split(\"|\")\r\n if title_type[0] == \"msg\":\r\n title = title_type[1]\r\n color = colors[\"Green\"]\r\n webhook(title, color, description, webhook_url)\r\n \r\n elif title_type[0] == \"msg_b\":\r\n title = title_type[1]\r\n color = colors[\"DarkAqua\"]\r\n webhook(title, color, description, webhook_url)\r\n\r\n elif title_type[0] == \"msg_s\":\r\n title = title_type[1]\r\n color = colors[\"DarkAqua\"]\r\n webhook(title, color, description, webhook_url)\r\n\r\n elif title_type[0] == \"i_s\": #invest or slippage was changed etc\r\n title = title_type[1]\r\n color = colors[\"DarkPurple\"]\r\n webhook(title, color, description, webhook_url)\r\n \r\n elif title_type[0] == \"e\": #error\r\n title = title_type[1]\r\n color = colors[\"DarkRed\"]\r\n webhook(title, color, description, error_webhook)\r\n\r\n elif title_type[0] == \"a\": #alert\r\n title = title_type[1]\r\n color = colors[\"LuminousVividPink\"]\r\n webhook(title, color, description, webhook_url)\r\n\r\n elif title_type[0] == \"w\": #wallet info\r\n title = title_type[1]\r\n color = colors[\"Gold\"]\r\n webhook(title, color, description, webhook_url)\r" }, { "identifier": "load_keypair_from_file", "path": "loadkey.py", "snippet": "def load_keypair_from_file(filename):\r\n curr = os.path.join(sys.path[0], 'data', filename)\r\n with open(curr, 'r') as file:\r\n secret = json.load(file)\r\n secret_key = bytes(secret)\r\n # print(base58.b58encode(secret_key))\r\n return Keypair.from_bytes(secret_key)\r" } ]
import base58, logging,time, re, os,sys, json import threading from datetime import datetime from solders.keypair import Keypair from solana.rpc.api import Client from solana.rpc.commitment import Commitment from configparser import ConfigParser from threading import Thread, Event from birdeye import getSymbol from telethon import TelegramClient, events, errors from amm_selection import select_amm2trade from webhook import sendWebhook from loadkey import load_keypair_from_file
3,558
# Pakages for Telegram # Other Methods created # ------------------------ ------------------------ ------------------------ # INTIALIZING VARIABLES # ------------------------ ------------------------ ------------------------ # to read content from config.ini config = ConfigParser() # using sys and os because sometimes this shitty config reader does not read from curr directory config.read(os.path.join(sys.path[0], 'data', 'config.ini')) # Configuring the logging log_file = os.path.join('data', f"logs.txt") logging.basicConfig(level=logging.WARNING, filename=log_file, format='%(asctime)s|%(name)s|%(levelname)s|%(message)s',datefmt='%d-%b-%y %I:%M:%S %p') def custom_exception_handler(exc_type, exc_value, exc_traceback): # Log the exception automatically logging.exception("An unhandled exception occurred: %s", str(exc_value)) sys.excepthook = custom_exception_handler # Telegram settings senderUserNames_to_monitor = config.get("TELEGRAM", "senderUserNames") senderUserNames = senderUserNames_to_monitor.split(',') session_name = config.get("TELEGRAM", "session_name") api_id = config.getint("TELEGRAM", "API_ID") api_hash = config.get("TELEGRAM", "API_HASH") discord_msg_pattern = r'https?://birdeye\.so/token/(\w+)\?chain=solana' CA_pattern = r'[1-9A-HJ-NP-Za-km-z]{32,44}' # Infura settings - register at infura and get your mainnet url. RPC_HTTPS_URL = config.get("INFURA_URL", "infuraURL") # Wallets private key private_key = config.get("WALLET", "private_key") # Check if private key is in the form of ./something.json if re.match(r'\w+\.json', private_key): # Private key is in the form of ./something.json
# Pakages for Telegram # Other Methods created # ------------------------ ------------------------ ------------------------ # INTIALIZING VARIABLES # ------------------------ ------------------------ ------------------------ # to read content from config.ini config = ConfigParser() # using sys and os because sometimes this shitty config reader does not read from curr directory config.read(os.path.join(sys.path[0], 'data', 'config.ini')) # Configuring the logging log_file = os.path.join('data', f"logs.txt") logging.basicConfig(level=logging.WARNING, filename=log_file, format='%(asctime)s|%(name)s|%(levelname)s|%(message)s',datefmt='%d-%b-%y %I:%M:%S %p') def custom_exception_handler(exc_type, exc_value, exc_traceback): # Log the exception automatically logging.exception("An unhandled exception occurred: %s", str(exc_value)) sys.excepthook = custom_exception_handler # Telegram settings senderUserNames_to_monitor = config.get("TELEGRAM", "senderUserNames") senderUserNames = senderUserNames_to_monitor.split(',') session_name = config.get("TELEGRAM", "session_name") api_id = config.getint("TELEGRAM", "API_ID") api_hash = config.get("TELEGRAM", "API_HASH") discord_msg_pattern = r'https?://birdeye\.so/token/(\w+)\?chain=solana' CA_pattern = r'[1-9A-HJ-NP-Za-km-z]{32,44}' # Infura settings - register at infura and get your mainnet url. RPC_HTTPS_URL = config.get("INFURA_URL", "infuraURL") # Wallets private key private_key = config.get("WALLET", "private_key") # Check if private key is in the form of ./something.json if re.match(r'\w+\.json', private_key): # Private key is in the form of ./something.json
payer = load_keypair_from_file(private_key)
3
2023-12-26 11:40:05+00:00
4k
CrawlScript/Torch-MGDCF
main_light_gcn.py
[ { "identifier": "compute_bpr_loss", "path": "torch_mgdcf/losses.py", "snippet": "def compute_bpr_loss(a_embeddings, b_embeddings, pos_edges, reduction='mean'):\n \"\"\"\n bpr is a special case of info_bpr, where num_negs=1\n \"\"\"\n return compute_info_bpr_loss(a_embeddings, b_embeddings, pos_edges, num_negs=1, reduction=reduction)" }, { "identifier": "compute_l2_loss", "path": "torch_mgdcf/losses.py", "snippet": "def compute_l2_loss(params):\n \"\"\"\n Compute l2 loss for a list of parameters/tensors\n \"\"\"\n l2_loss = 0.0\n for param in params:\n l2_loss += param.pow(2).sum() * 0.5\n return l2_loss" }, { "identifier": "create_tensor_dataloader", "path": "torch_mgdcf/utils.py", "snippet": "def create_tensor_dataloader(tensor, batch_size=None, shuffle=False):\n dataset = TensorDataset(tensor)\n if shuffle:\n sampler = RandomSampler(dataset)\n else:\n sampler = SequentialSampler(dataset)\n return DataLoader(dataset, \n sampler=BatchSampler(sampler, batch_size=batch_size, drop_last=False),\n collate_fn=lambda batchs: batchs[0][0]\n )" }, { "identifier": "load_dataset", "path": "torch_mgdcf/datasets.py", "snippet": "def load_dataset(dataset_name, data_root_path=\"./datasets\", cache_name=\"cache.p\"):\n \"\"\"\n Load the DGL dataset.\n :param dataset_name: \"yelp\" | \"gowalla\" | \"amazon-book\"\n :param dataset_root_path:\n :return:\n \"\"\"\n\n dataset_root_path = os.path.join(data_root_path, dataset_name)\n processed_root_path = os.path.join(dataset_root_path, \"processed\")\n cache_path = None if cache_name is None else os.path.join(processed_root_path, cache_name)\n raw_root_path = os.path.join(dataset_root_path, \"raw\")\n download_root_path = os.path.join(dataset_root_path, \"download\")\n download_file_name=\"{}.zip\".format(dataset_name)\n download_file_path = os.path.join(download_root_path, download_file_name)\n download_url = \"https://github.com/maenzhier/grecx_datasets/raw/main/{}/{}.zip\".format(dataset_name.replace(\"light_gcn_\", \"\"), dataset_name)\n dataset_unzip_path = os.path.join(raw_root_path, dataset_name)\n\n for path in [dataset_root_path, processed_root_path, raw_root_path, download_root_path]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n\n\n if cache_path is not None and os.path.exists(cache_path):\n print(\"cache file exists: {}, read cache\".format(cache_path))\n with open(cache_path, \"rb\") as f:\n dataset = pickle.load(f)\n return dataset\n\n\n\n if not os.path.exists(download_file_path):\n download_file(download_url, download_file_path)\n \n\n if len(os.listdir(raw_root_path)) == 0:\n extract_zip(download_file_path, raw_root_path)\n\n processed = _process(dataset_unzip_path)\n\n if cache_path is not None:\n print(\"save processed data to cache: \", cache_path)\n with open(cache_path, \"wb\") as f:\n pickle.dump(processed, f)\n\n\n return processed" }, { "identifier": "LightGCN", "path": "torch_mgdcf/layers/light_gcn.py", "snippet": "class LightGCN(nn.Module):\n\n CACHE_KEY = \"light_gcn_weight\"\n\n def __init__(self, k, edge_drop_rate, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.k = k\n self.edge_drop_rate = edge_drop_rate\n\n self.edge_dropout = nn.Dropout(edge_drop_rate)\n\n @classmethod\n def build_homo_graph(cls, user_item_edges, num_users=None, num_items=None):\n\n user_index, item_index = user_item_edges.T\n\n if num_users is None:\n num_users = np.max(user_index) + 1\n\n if num_items is None:\n num_items = np.max(item_index) + 1\n\n num_homo_nodes = num_users + num_items\n homo_item_index = item_index + num_users\n src = user_index\n dst = homo_item_index\n\n g = dgl.graph((src, dst), num_nodes=num_homo_nodes)\n g = dgl.add_reverse_edges(g)\n # LightGCN does not consider self-loop\n # g = dgl.add_self_loop(g)\n g = dgl.to_simple(g)\n \n return g\n\n @classmethod\n @torch.no_grad()\n def norm_adj(cls, g):\n\n CACHE_KEY = LightGCN.CACHE_KEY\n\n if CACHE_KEY in g.edata:\n return\n \n degs = g.in_degrees()\n src_norm = degs.pow(-0.5)\n dst_norm = src_norm\n\n with g.local_scope():\n g.ndata[\"src_norm\"] = src_norm\n g.ndata[\"dst_norm\"] = dst_norm\n g.apply_edges(fn.u_mul_v(\"src_norm\", \"dst_norm\", CACHE_KEY))\n gcn_weight = g.edata[CACHE_KEY]\n\n g.edata[CACHE_KEY] = gcn_weight\n\n\n def forward(self, g, x):\n\n CACHE_KEY = LightGCN.CACHE_KEY\n LightGCN.norm_adj(g)\n\n edge_weight = g.edata[CACHE_KEY]\n dropped_edge_weight = self.edge_dropout(edge_weight)\n\n\n with g.local_scope():\n g.edata[CACHE_KEY] = dropped_edge_weight\n g.ndata[\"h\"] = x\n\n h = x\n h_list = [h]\n\n for _ in range(self.k):\n g.update_all(fn.u_mul_e(\"h\", CACHE_KEY, \"m\"), fn.sum(\"m\", \"h\"))\n h = g.ndata[\"h\"]\n h_list.append(h)\n\n h = torch.stack(h_list, dim=1).mean(dim=1)\n\n return h" }, { "identifier": "evaluate_mean_global_metrics", "path": "torch_mgdcf/evaluation/ranking.py", "snippet": "def evaluate_mean_global_metrics(user_items_dict, user_mask_items_dict,\n user_embedding, item_embedding,\n k_list=[10, 20], metrics=[\"ndcg\"]):\n\n v_search = VectorSearchEngine(item_embedding)\n\n if isinstance(user_embedding, torch.Tensor):\n user_embedding = user_embedding.detach().cpu().numpy()\n else:\n user_embedding = np.asarray(user_embedding)\n\n user_indices = list(user_items_dict.keys())\n embedded_users = user_embedding[user_indices]\n max_mask_items_length = max(len(user_mask_items_dict[user]) for user in user_indices)\n\n _, user_rank_pred_items = v_search.search(embedded_users, k_list[-1] + max_mask_items_length)\n\n res_scores = []\n for user, pred_items in tqdm(zip(user_indices, user_rank_pred_items)):\n\n items = user_items_dict[user]\n mask_items = user_mask_items_dict[user]\n pred_items = [item for item in pred_items if item not in mask_items][:k_list[-1]]\n\n res_score = score(items, pred_items, k_list, metrics)\n\n res_scores.append(res_score)\n\n res_scores = np.asarray(res_scores)\n names = []\n for metric in metrics:\n for k in k_list:\n names.append(\"{}@{}\".format(metric, k))\n\n # return list(zip(names, np.mean(res_scores, axis=0, keepdims=False)))\n return dict(zip(names, np.mean(res_scores, axis=0, keepdims=False)))" } ]
import os import argparse import torch import torch.nn.functional as F import numpy as np import time from torch_mgdcf.losses import compute_bpr_loss, compute_l2_loss from torch_mgdcf.utils import create_tensor_dataloader from torch_mgdcf.datasets import load_dataset from torch_mgdcf.layers.light_gcn import LightGCN from tqdm import tqdm from torch_mgdcf.evaluation.ranking import evaluate_mean_global_metrics
2,425
# set gpu id os.environ["CUDA_VISIBLE_DEVICES"] = "1" np.set_printoptions(precision=4) parser = argparse.ArgumentParser(description='Argument parser for the program.') parser.add_argument('--dataset', type=str, default='light_gcn_yelp', help='Dataset Name') parser.add_argument('--embedding_size', type=int, default=64, help='Embedding size') parser.add_argument('--lr', type=float, default=1e-2, help='Learning rate') parser.add_argument('--l2_coef', type=float, default=1e-4, help='L2 regularization coefficient') parser.add_argument('--lr_decay', type=float, default=0.995, help='Learning rate decay') parser.add_argument('--k', type=int, default=4, help='Number of layers') parser.add_argument('--edge_drop_rate', type=float, default=0.15, help='Edge drop rate') parser.add_argument('--batch_size', type=int, default=8000, help='Batch size') parser.add_argument('--num_epochs', type=int, default=3000, help='Number of epochs') args = parser.parse_args() print(args) dataset_name = args.dataset embedding_size = args.embedding_size lr = args.lr l2_coef = args.l2_coef lr_decay = args.lr_decay k = args.k edge_drop_rate = args.edge_drop_rate batch_size = args.batch_size num_epochs = args.num_epochs device = "cuda" num_users, num_items, user_item_edges, train_index, test_index, train_user_items_dict, test_user_items_dict = load_dataset(dataset_name) train_user_item_edges = user_item_edges[train_index] g = LightGCN.build_homo_graph(train_user_item_edges, num_users=num_users, num_items=num_items).to(device) num_nodes = g.num_nodes() embeddings = np.random.randn(num_nodes, embedding_size) / np.sqrt(embedding_size) embeddings = torch.tensor(embeddings, dtype=torch.float32, requires_grad=True, device=device) model = LightGCN(k=k, edge_drop_rate=edge_drop_rate).to(device) def forward(): virtual_h = model(g, embeddings) user_h = virtual_h[:num_users] item_h = virtual_h[num_users:] return user_h, item_h def evaluate(): model.eval() user_h, item_h = forward() user_h = user_h.detach().cpu().numpy() item_h = item_h.detach().cpu().numpy()
# set gpu id os.environ["CUDA_VISIBLE_DEVICES"] = "1" np.set_printoptions(precision=4) parser = argparse.ArgumentParser(description='Argument parser for the program.') parser.add_argument('--dataset', type=str, default='light_gcn_yelp', help='Dataset Name') parser.add_argument('--embedding_size', type=int, default=64, help='Embedding size') parser.add_argument('--lr', type=float, default=1e-2, help='Learning rate') parser.add_argument('--l2_coef', type=float, default=1e-4, help='L2 regularization coefficient') parser.add_argument('--lr_decay', type=float, default=0.995, help='Learning rate decay') parser.add_argument('--k', type=int, default=4, help='Number of layers') parser.add_argument('--edge_drop_rate', type=float, default=0.15, help='Edge drop rate') parser.add_argument('--batch_size', type=int, default=8000, help='Batch size') parser.add_argument('--num_epochs', type=int, default=3000, help='Number of epochs') args = parser.parse_args() print(args) dataset_name = args.dataset embedding_size = args.embedding_size lr = args.lr l2_coef = args.l2_coef lr_decay = args.lr_decay k = args.k edge_drop_rate = args.edge_drop_rate batch_size = args.batch_size num_epochs = args.num_epochs device = "cuda" num_users, num_items, user_item_edges, train_index, test_index, train_user_items_dict, test_user_items_dict = load_dataset(dataset_name) train_user_item_edges = user_item_edges[train_index] g = LightGCN.build_homo_graph(train_user_item_edges, num_users=num_users, num_items=num_items).to(device) num_nodes = g.num_nodes() embeddings = np.random.randn(num_nodes, embedding_size) / np.sqrt(embedding_size) embeddings = torch.tensor(embeddings, dtype=torch.float32, requires_grad=True, device=device) model = LightGCN(k=k, edge_drop_rate=edge_drop_rate).to(device) def forward(): virtual_h = model(g, embeddings) user_h = virtual_h[:num_users] item_h = virtual_h[num_users:] return user_h, item_h def evaluate(): model.eval() user_h, item_h = forward() user_h = user_h.detach().cpu().numpy() item_h = item_h.detach().cpu().numpy()
mean_results_dict = evaluate_mean_global_metrics(test_user_items_dict, train_user_items_dict,
5
2023-12-26 10:26:50+00:00
4k
kraina-ai/quackosm
quackosm/pbf_file_reader.py
[ { "identifier": "FEATURES_INDEX", "path": "quackosm/_constants.py", "snippet": "FEATURES_INDEX = \"feature_id\"" }, { "identifier": "GEOMETRY_COLUMN", "path": "quackosm/_constants.py", "snippet": "GEOMETRY_COLUMN = \"geometry\"" }, { "identifier": "WGS84_CRS", "path": "quackosm/_constants.py", "snippet": "WGS84_CRS = \"EPSG:4326\"" }, { "identifier": "GroupedOsmTagsFilter", "path": "quackosm/_osm_tags_filters.py", "snippet": "def merge_osm_tags_filter(osm_tags_filter: OsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: GroupedOsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: Iterable[OsmTagsFilter]) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: Iterable[GroupedOsmTagsFilter]) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(\n osm_tags_filter: Union[\n OsmTagsFilter, GroupedOsmTagsFilter, Iterable[OsmTagsFilter], Iterable[GroupedOsmTagsFilter]\n ]\n) -> OsmTagsFilter:\ndef _merge_grouped_osm_tags_filter(grouped_filter: GroupedOsmTagsFilter) -> OsmTagsFilter:\ndef _merge_multiple_osm_tags_filters(osm_tags_filters: Iterable[OsmTagsFilter]) -> OsmTagsFilter:" }, { "identifier": "OsmWayPolygonConfig", "path": "quackosm/_osm_way_polygon_features.py", "snippet": "class OsmWayPolygonConfig(NamedTuple):\n \"\"\"OSM Way polygon features config object.\"\"\"\n\n all: Iterable[str]\n allowlist: dict[str, Iterable[str]]\n denylist: dict[str, Iterable[str]]" }, { "identifier": "parse_dict_to_config_object", "path": "quackosm/_osm_way_polygon_features.py", "snippet": "def parse_dict_to_config_object(raw_config: dict[str, Any]) -> OsmWayPolygonConfig:\n all_tags = raw_config.get(\"all\", [])\n allowlist_tags = raw_config.get(\"allowlist\", {})\n denylist_tags = raw_config.get(\"denylist\", {})\n if not is_expected_type(all_tags, Iterable[str]):\n raise ValueError(f\"Wrong type of key: all ({type(all_tags)})\")\n\n if not is_expected_type(allowlist_tags, dict[str, Iterable[str]]):\n raise ValueError(f\"Wrong type of key: all ({type(allowlist_tags)})\")\n\n if not is_expected_type(denylist_tags, dict[str, Iterable[str]]):\n raise ValueError(f\"Wrong type of key: denylist ({type(denylist_tags)})\")\n\n return OsmWayPolygonConfig(\n all=cast(Iterable[str], all_tags),\n allowlist=cast(dict[str, Iterable[str]], allowlist_tags),\n denylist=cast(dict[str, Iterable[str]], denylist_tags),\n )" }, { "identifier": "TaskProgressBar", "path": "quackosm/_rich_progress.py", "snippet": "class TaskProgressBar:\n def __init__(self, step_name: str, step_number: str):\n self.step_name = step_name\n self.step_number = step_number\n self.progress = None\n\n def __enter__(self):\n try: # pragma: no cover\n from rich.progress import (\n BarColumn,\n MofNCompleteColumn,\n Progress,\n ProgressColumn,\n SpinnerColumn,\n Task,\n Text,\n TextColumn,\n TimeElapsedColumn,\n TimeRemainingColumn,\n )\n\n class SpeedColumn(ProgressColumn):\n def render(self, task: \"Task\") -> Text:\n if task.speed is None:\n return Text(\"\")\n elif task.speed >= 1:\n return Text(f\"{task.speed:.2f} it/s\")\n else:\n return Text(f\"{1/task.speed:.2f} s/it\") # noqa: FURB126\n\n self.progress = Progress(\n SpinnerColumn(),\n TextColumn(f\"[{self.step_number: >4}/{TOTAL_STEPS}]\"),\n TextColumn(\n \"[progress.description]{task.description}\"\n \" [progress.percentage]{task.percentage:>3.0f}%\"\n ),\n BarColumn(),\n MofNCompleteColumn(),\n TextColumn(\"•\"),\n TimeElapsedColumn(),\n TextColumn(\"<\"),\n TimeRemainingColumn(),\n TextColumn(\"•\"),\n SpeedColumn(),\n transient=False,\n speed_estimate_period=1800,\n )\n\n self.progress.__enter__()\n\n except ImportError:\n self.progress = None\n\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n if self.progress:\n self.progress.__exit__(exc_type, exc_value, exc_tb)\n\n self.progress = None\n\n def track(self, iterable: Iterable):\n if self.progress is not None:\n for i in self.progress.track(list(iterable), description=self.step_name):\n yield i\n else:\n for i in iterable:\n yield i" }, { "identifier": "TaskProgressSpinner", "path": "quackosm/_rich_progress.py", "snippet": "class TaskProgressSpinner:\n def __init__(self, step_name: str, step_number: str):\n self.step_name = step_name\n self.step_number = step_number\n self.progress = None\n\n def __enter__(self):\n try: # pragma: no cover\n from rich.progress import Progress, SpinnerColumn, TextColumn, TimeElapsedColumn\n\n self.progress = Progress(\n SpinnerColumn(),\n TextColumn(f\"[{self.step_number: >4}/{TOTAL_STEPS}]\"),\n TextColumn(\"[progress.description]{task.description}\"),\n TextColumn(\"•\"),\n TimeElapsedColumn(),\n transient=False,\n )\n\n self.progress.__enter__()\n self.progress.add_task(description=self.step_name, total=None)\n\n except ImportError:\n self.progress = None\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n if self.progress:\n self.progress.__exit__(exc_type, exc_value, exc_tb)\n\n self.progress = None" }, { "identifier": "is_expected_type", "path": "quackosm/_typing.py", "snippet": "def is_expected_type(value: object, expected_type: Any) -> bool:\n \"\"\"\n Check if an object is a given type.\n\n Uses `typeguard` library to check objects using `typing` definitions.\n\n Args:\n value (object): Value to be checked against `expected_type`.\n expected_type (Any): A class or generic type instance.\n\n Returns:\n bool: Flag whether the object is an instance of the required type.\n \"\"\"\n result = False\n\n with suppress(TypeCheckError):\n check_type(value, expected_type)\n result = True\n\n return result" } ]
import hashlib import json import shutil import tempfile import warnings import duckdb import geoarrow.pyarrow as ga import geopandas as gpd import psutil import pyarrow as pa import pyarrow.parquet as pq import shapely.wkt as wktlib import quackosm._geo_arrow_io as io from collections.abc import Iterable from math import floor from pathlib import Path from typing import Any, Literal, NamedTuple, Optional, Union, cast from shapely.geometry.base import BaseGeometry from quackosm._constants import FEATURES_INDEX, GEOMETRY_COLUMN, WGS84_CRS from quackosm._osm_tags_filters import GroupedOsmTagsFilter, OsmTagsFilter, merge_osm_tags_filter from quackosm._osm_way_polygon_features import OsmWayPolygonConfig, parse_dict_to_config_object from quackosm._rich_progress import ( # type: ignore[attr-defined] TaskProgressBar, TaskProgressSpinner, ) from quackosm._typing import is_expected_type
2,576
""" PBF File Reader. This module contains a reader capable of parsing a PBF file into a GeoDataFrame. """ __all__ = [ "PbfFileReader", ] class PbfFileReader: """ PbfFileReader. PBF(Protocolbuffer Binary Format)[1] file reader is a dedicated `*.osm.pbf` files reader class based on DuckDB[2] and its spatial extension[3]. Handler can filter out OSM features based on tags filter and geometry filter to limit the result. References: 1. https://wiki.openstreetmap.org/wiki/PBF_Format 2. https://duckdb.org/ 3. https://github.com/duckdb/duckdb_spatial """ class ConvertedOSMParquetFiles(NamedTuple): """List of parquet files read from the `*.osm.pbf` file.""" nodes_valid_with_tags: "duckdb.DuckDBPyRelation" nodes_filtered_ids: "duckdb.DuckDBPyRelation" ways_all_with_tags: "duckdb.DuckDBPyRelation" ways_with_unnested_nodes_refs: "duckdb.DuckDBPyRelation" ways_required_ids: "duckdb.DuckDBPyRelation" ways_filtered_ids: "duckdb.DuckDBPyRelation" relations_all_with_tags: "duckdb.DuckDBPyRelation" relations_with_unnested_way_refs: "duckdb.DuckDBPyRelation" relations_filtered_ids: "duckdb.DuckDBPyRelation" class ParsedOSMFeatures(NamedTuple): """Final list of parsed features from the `*.osm.pbf` file.""" nodes: "duckdb.DuckDBPyRelation" ways: "duckdb.DuckDBPyRelation" relations: "duckdb.DuckDBPyRelation" def __init__( self, tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None, geometry_filter: Optional[BaseGeometry] = None, working_directory: Union[str, Path] = "files", osm_way_polygon_features_config: Optional[ Union[OsmWayPolygonConfig, dict[str, Any]] ] = None, ) -> None: """ Initialize PbfFileReader. Args: tags_filter (Union[OsmTagsFilter, GroupedOsmTagsFilter], optional): A dictionary specifying which tags to download. The keys should be OSM tags (e.g. `building`, `amenity`). The values should either be `True` for retrieving all objects with the tag, string for retrieving a single tag-value pair or list of strings for retrieving all values specified in the list. `tags={'leisure': 'park}` would return parks from the area. `tags={'leisure': 'park, 'amenity': True, 'shop': ['bakery', 'bicycle']}` would return parks, all amenity types, bakeries and bicycle shops. If `None`, handler will allow all of the tags to be parsed. Defaults to `None`. geometry_filter (BaseGeometry, optional): Region which can be used to filter only intersecting OSM objects. Defaults to `None`. working_directory (Union[str, Path], optional): Directory where to save the parsed `*.parquet` files. Defaults to "files". osm_way_polygon_features_config (Union[OsmWayPolygonConfig, dict[str, Any]], optional): Config used to determine which closed way features are polygons. Modifications to this config left are left for experienced OSM users. Defaults to predefined "osm_way_polygon_features.json". """ self.tags_filter = tags_filter
""" PBF File Reader. This module contains a reader capable of parsing a PBF file into a GeoDataFrame. """ __all__ = [ "PbfFileReader", ] class PbfFileReader: """ PbfFileReader. PBF(Protocolbuffer Binary Format)[1] file reader is a dedicated `*.osm.pbf` files reader class based on DuckDB[2] and its spatial extension[3]. Handler can filter out OSM features based on tags filter and geometry filter to limit the result. References: 1. https://wiki.openstreetmap.org/wiki/PBF_Format 2. https://duckdb.org/ 3. https://github.com/duckdb/duckdb_spatial """ class ConvertedOSMParquetFiles(NamedTuple): """List of parquet files read from the `*.osm.pbf` file.""" nodes_valid_with_tags: "duckdb.DuckDBPyRelation" nodes_filtered_ids: "duckdb.DuckDBPyRelation" ways_all_with_tags: "duckdb.DuckDBPyRelation" ways_with_unnested_nodes_refs: "duckdb.DuckDBPyRelation" ways_required_ids: "duckdb.DuckDBPyRelation" ways_filtered_ids: "duckdb.DuckDBPyRelation" relations_all_with_tags: "duckdb.DuckDBPyRelation" relations_with_unnested_way_refs: "duckdb.DuckDBPyRelation" relations_filtered_ids: "duckdb.DuckDBPyRelation" class ParsedOSMFeatures(NamedTuple): """Final list of parsed features from the `*.osm.pbf` file.""" nodes: "duckdb.DuckDBPyRelation" ways: "duckdb.DuckDBPyRelation" relations: "duckdb.DuckDBPyRelation" def __init__( self, tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None, geometry_filter: Optional[BaseGeometry] = None, working_directory: Union[str, Path] = "files", osm_way_polygon_features_config: Optional[ Union[OsmWayPolygonConfig, dict[str, Any]] ] = None, ) -> None: """ Initialize PbfFileReader. Args: tags_filter (Union[OsmTagsFilter, GroupedOsmTagsFilter], optional): A dictionary specifying which tags to download. The keys should be OSM tags (e.g. `building`, `amenity`). The values should either be `True` for retrieving all objects with the tag, string for retrieving a single tag-value pair or list of strings for retrieving all values specified in the list. `tags={'leisure': 'park}` would return parks from the area. `tags={'leisure': 'park, 'amenity': True, 'shop': ['bakery', 'bicycle']}` would return parks, all amenity types, bakeries and bicycle shops. If `None`, handler will allow all of the tags to be parsed. Defaults to `None`. geometry_filter (BaseGeometry, optional): Region which can be used to filter only intersecting OSM objects. Defaults to `None`. working_directory (Union[str, Path], optional): Directory where to save the parsed `*.parquet` files. Defaults to "files". osm_way_polygon_features_config (Union[OsmWayPolygonConfig, dict[str, Any]], optional): Config used to determine which closed way features are polygons. Modifications to this config left are left for experienced OSM users. Defaults to predefined "osm_way_polygon_features.json". """ self.tags_filter = tags_filter
self.merged_tags_filter = merge_osm_tags_filter(tags_filter) if tags_filter else None
3
2023-12-28 11:26:41+00:00
4k
KyanChen/TTP
tools/analysis_tools/visualization_cam.py
[ { "identifier": "inference_model", "path": "mmseg/apis/inference.py", "snippet": "def inference_model(model: BaseSegmentor,\n img: ImageType) -> Union[SegDataSample, SampleList]:\n \"\"\"Inference image(s) with the segmentor.\n\n Args:\n model (nn.Module): The loaded segmentor.\n imgs (str/ndarray or list[str/ndarray]): Either image files or loaded\n images.\n\n Returns:\n :obj:`SegDataSample` or list[:obj:`SegDataSample`]:\n If imgs is a list or tuple, the same length list type results\n will be returned, otherwise return the segmentation results directly.\n \"\"\"\n # prepare data\n data, is_batch = _preprare_data(img, model)\n\n # forward the model\n with torch.no_grad():\n results = model.test_step(data)\n\n return results if is_batch else results[0]" }, { "identifier": "init_model", "path": "mmseg/apis/inference.py", "snippet": "def init_model(config: Union[str, Path, Config],\n checkpoint: Optional[str] = None,\n device: str = 'cuda:0',\n cfg_options: Optional[dict] = None):\n \"\"\"Initialize a segmentor from config file.\n\n Args:\n config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path,\n :obj:`Path`, or the config object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n device (str, optional) CPU/CUDA device option. Default 'cuda:0'.\n Use 'cpu' for loading model on CPU.\n cfg_options (dict, optional): Options to override some settings in\n the used config.\n Returns:\n nn.Module: The constructed segmentor.\n \"\"\"\n if isinstance(config, (str, Path)):\n config = Config.fromfile(config)\n elif not isinstance(config, Config):\n raise TypeError('config must be a filename or Config object, '\n 'but got {}'.format(type(config)))\n if cfg_options is not None:\n config.merge_from_dict(cfg_options)\n if config.model.type == 'EncoderDecoder':\n if 'init_cfg' in config.model.backbone:\n config.model.backbone.init_cfg = None\n elif config.model.type == 'MultimodalEncoderDecoder':\n for k, v in config.model.items():\n if isinstance(v, dict) and 'init_cfg' in v:\n config.model[k].init_cfg = None\n config.model.pretrained = None\n config.model.train_cfg = None\n init_default_scope(config.get('default_scope', 'mmseg'))\n\n model = MODELS.build(config.model)\n if checkpoint is not None:\n checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')\n dataset_meta = checkpoint['meta'].get('dataset_meta', None)\n # save the dataset_meta in the model for convenience\n if 'dataset_meta' in checkpoint.get('meta', {}):\n # mmseg 1.x\n model.dataset_meta = dataset_meta\n elif 'CLASSES' in checkpoint.get('meta', {}):\n # < mmseg 1.x\n classes = checkpoint['meta']['CLASSES']\n palette = checkpoint['meta']['PALETTE']\n model.dataset_meta = {'classes': classes, 'palette': palette}\n else:\n warnings.simplefilter('once')\n warnings.warn(\n 'dataset_meta or class names are not saved in the '\n 'checkpoint\\'s meta data, classes and palette will be'\n 'set according to num_classes ')\n num_classes = model.decode_head.num_classes\n dataset_name = None\n for name in dataset_aliases.keys():\n if len(get_classes(name)) == num_classes:\n dataset_name = name\n break\n if dataset_name is None:\n warnings.warn(\n 'No suitable dataset found, use Cityscapes by default')\n dataset_name = 'cityscapes'\n model.dataset_meta = {\n 'classes': get_classes(dataset_name),\n 'palette': get_palette(dataset_name)\n }\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model" }, { "identifier": "show_result_pyplot", "path": "mmseg/apis/inference.py", "snippet": "def show_result_pyplot(model: BaseSegmentor,\n img: Union[str, np.ndarray],\n result: SegDataSample,\n opacity: float = 0.5,\n title: str = '',\n draw_gt: bool = True,\n draw_pred: bool = True,\n wait_time: float = 0,\n show: bool = True,\n with_labels: Optional[bool] = True,\n save_dir=None,\n out_file=None):\n \"\"\"Visualize the segmentation results on the image.\n\n Args:\n model (nn.Module): The loaded segmentor.\n img (str or np.ndarray): Image filename or loaded image.\n result (SegDataSample): The prediction SegDataSample result.\n opacity(float): Opacity of painted segmentation map.\n Default 0.5. Must be in (0, 1] range.\n title (str): The title of pyplot figure.\n Default is ''.\n draw_gt (bool): Whether to draw GT SegDataSample. Default to True.\n draw_pred (bool): Whether to draw Prediction SegDataSample.\n Defaults to True.\n wait_time (float): The interval of show (s). 0 is the special value\n that means \"forever\". Defaults to 0.\n show (bool): Whether to display the drawn image.\n Default to True.\n with_labels(bool, optional): Add semantic labels in visualization\n result, Default to True.\n save_dir (str, optional): Save file dir for all storage backends.\n If it is None, the backend storage will not save any data.\n out_file (str, optional): Path to output file. Default to None.\n\n\n\n Returns:\n np.ndarray: the drawn image which channel is RGB.\n \"\"\"\n if hasattr(model, 'module'):\n model = model.module\n if isinstance(img, str):\n image = mmcv.imread(img, channel_order='rgb')\n else:\n image = img\n if save_dir is not None:\n mkdir_or_exist(save_dir)\n # init visualizer\n visualizer = SegLocalVisualizer(\n vis_backends=[dict(type='LocalVisBackend')],\n save_dir=save_dir,\n alpha=opacity)\n visualizer.dataset_meta = dict(\n classes=model.dataset_meta['classes'],\n palette=model.dataset_meta['palette'])\n visualizer.add_datasample(\n name=title,\n image=image,\n data_sample=result,\n draw_gt=draw_gt,\n draw_pred=draw_pred,\n wait_time=wait_time,\n out_file=out_file,\n show=show,\n with_labels=with_labels)\n vis_img = visualizer.get_image()\n\n return vis_img" }, { "identifier": "register_all_modules", "path": "mmseg/utils/set_env.py", "snippet": "def register_all_modules(init_default_scope: bool = True) -> None:\n \"\"\"Register all modules in mmseg into the registries.\n\n Args:\n init_default_scope (bool): Whether initialize the mmseg default scope.\n When `init_default_scope=True`, the global default scope will be\n set to `mmseg`, and all registries will build modules from mmseg's\n registry node. To understand more about the registry, please refer\n to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md\n Defaults to True.\n \"\"\" # noqa\n import mmseg.datasets # noqa: F401,F403\n import mmseg.engine # noqa: F401,F403\n import mmseg.evaluation # noqa: F401,F403\n import mmseg.models # noqa: F401,F403\n import mmseg.structures # noqa: F401,F403\n\n if init_default_scope:\n never_created = DefaultScope.get_current_instance() is None \\\n or not DefaultScope.check_instance_created('mmseg')\n if never_created:\n DefaultScope.get_instance('mmseg', scope_name='mmseg')\n return\n current_scope = DefaultScope.get_current_instance()\n if current_scope.scope_name != 'mmseg':\n warnings.warn('The current default scope '\n f'\"{current_scope.scope_name}\" is not \"mmseg\", '\n '`register_all_modules` will force the current'\n 'default scope to be \"mmseg\". If this is not '\n 'expected, please set `init_default_scope=False`.')\n # avoid name conflict\n new_instance_name = f'mmseg-{datetime.datetime.now()}'\n DefaultScope.get_instance(new_instance_name, scope_name='mmseg')" } ]
from argparse import ArgumentParser from mmengine import Config from mmengine.model import revert_sync_batchnorm from PIL import Image from pytorch_grad_cam import GradCAM from pytorch_grad_cam.utils.image import preprocess_image, show_cam_on_image from mmseg.apis import inference_model, init_model, show_result_pyplot from mmseg.utils import register_all_modules import numpy as np import torch import torch.nn.functional as F
2,608
# Copyright (c) OpenMMLab. All rights reserved. """Use the pytorch-grad-cam tool to visualize Class Activation Maps (CAM). requirement: pip install grad-cam """ class SemanticSegmentationTarget: """wrap the model. requirement: pip install grad-cam Args: category (int): Visualization class. mask (ndarray): Mask of class. size (tuple): Image size. """ def __init__(self, category, mask, size): self.category = category self.mask = torch.from_numpy(mask) self.size = size if torch.cuda.is_available(): self.mask = self.mask.cuda() def __call__(self, model_output): model_output = torch.unsqueeze(model_output, dim=0) model_output = F.interpolate( model_output, size=self.size, mode='bilinear') model_output = torch.squeeze(model_output, dim=0) return (model_output[self.category, :, :] * self.mask).sum() def main(): parser = ArgumentParser() parser.add_argument('img', help='Image file') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument( '--out-file', default='prediction.png', help='Path to output prediction file') parser.add_argument( '--cam-file', default='vis_cam.png', help='Path to output cam file') parser.add_argument( '--target-layers', default='backbone.layer4[2]', help='Target layers to visualize CAM') parser.add_argument( '--category-index', default='7', help='Category to visualize CAM') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') args = parser.parse_args() # build the model from a config file and a checkpoint file register_all_modules() model = init_model(args.config, args.checkpoint, device=args.device) if args.device == 'cpu': model = revert_sync_batchnorm(model) # test a single image result = inference_model(model, args.img) # show the results
# Copyright (c) OpenMMLab. All rights reserved. """Use the pytorch-grad-cam tool to visualize Class Activation Maps (CAM). requirement: pip install grad-cam """ class SemanticSegmentationTarget: """wrap the model. requirement: pip install grad-cam Args: category (int): Visualization class. mask (ndarray): Mask of class. size (tuple): Image size. """ def __init__(self, category, mask, size): self.category = category self.mask = torch.from_numpy(mask) self.size = size if torch.cuda.is_available(): self.mask = self.mask.cuda() def __call__(self, model_output): model_output = torch.unsqueeze(model_output, dim=0) model_output = F.interpolate( model_output, size=self.size, mode='bilinear') model_output = torch.squeeze(model_output, dim=0) return (model_output[self.category, :, :] * self.mask).sum() def main(): parser = ArgumentParser() parser.add_argument('img', help='Image file') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument( '--out-file', default='prediction.png', help='Path to output prediction file') parser.add_argument( '--cam-file', default='vis_cam.png', help='Path to output cam file') parser.add_argument( '--target-layers', default='backbone.layer4[2]', help='Target layers to visualize CAM') parser.add_argument( '--category-index', default='7', help='Category to visualize CAM') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') args = parser.parse_args() # build the model from a config file and a checkpoint file register_all_modules() model = init_model(args.config, args.checkpoint, device=args.device) if args.device == 'cpu': model = revert_sync_batchnorm(model) # test a single image result = inference_model(model, args.img) # show the results
show_result_pyplot(
2
2023-12-23 08:36:47+00:00
4k
N0rz3/Phunter
lib/cli.py
[ { "identifier": "lookup", "path": "lib/lookup.py", "snippet": "async def lookup(phone_number):\r\n print()\r\n parsed = phonenumbers.parse(phone_number)\r\n\r\n operator = carrier.name_for_number(parsed, \"fr\")\r\n line = phonenumbers.number_type(parsed)\r\n\r\n if line == phonenumbers.PhoneNumberType.FIXED_LINE:\r\n ligne = f\" [{GREEN}>{WHITE}] Line type: Fixed\"\r\n\r\n elif line == phonenumbers.PhoneNumberType.MOBILE:\r\n ligne = f\" [{GREEN}>{WHITE}] Line type: Mobile\"\r\n\r\n else:\r\n ligne = \" [-] Line not found\"\r\n\r\n possible = phonenumbers.is_possible_number(parsed)\r\n valid = phonenumbers.is_valid_number(parsed)\r\n\r\n with open(\"lib/country.json\", \"r\") as file:\r\n read = json.load(file)\r\n\r\n d = 0\r\n countrys = []\r\n\r\n for country, code in read.items():\r\n d += 1 \r\n\r\n if phone_number.startswith(code):\r\n countrys.append(country)\r\n\r\n if d == 153:\r\n break\r\n else:\r\n continue\r\n else:\r\n continue\r\n\r\n print(f\"{WHITE}📞 Phone number: {BLUE}{phone_number}{WHITE}\")\r\n\r\n if possible == True:\r\n pos = {\"possible\": \"✔️\"}\r\n else:\r\n pos = {\"possible\": \"❌\"}\r\n\r\n if valid == True:\r\n val = {\"valid\": \"✔️\"}\r\n else:\r\n val = {\"valid\": \"❌\"}\r\n\r\n print(f\" [{GREEN}>{WHITE}] Possible: {pos['possible']}\")\r\n print(f\" [{GREEN}>{WHITE}] Valid: {val['valid']}\")\r\n print()\r\n\r\n if operator != \"\":\r\n print(f\" [{GREEN}>{WHITE}] Operator: {operator}\")\r\n else:\r\n print(f\" [-] Not Operator\")\r\n try:\r\n print(f\" [{GREEN}>{WHITE}] Possible location: \" + str(countrys).replace(\"[\", \"\").replace(\"]\", \"\").replace(\"'\", \"\"))\r\n except:\r\n print(f\" [-] Not location\")\r\n\r\n print(ligne)\r\n\r\n await reputation(phone_number)\r\n\r\n await free(str(phone_number).replace(\"+\", \"\"))\r\n\r\n await spamcalls(p_n=phone_number)" }, { "identifier": "Amazon", "path": "lib/account.py", "snippet": "class Amazon:\r\n def setup_driver():\r\n browser = input(f\"[{YELLOW}?{WHITE}] What's your browser? Chrome or Firefox? (c/f): \")\r\n\r\n if browser.lower() == 'c':\r\n options = Options()\r\n options.add_argument('--headless')\r\n\r\n service = Service(ChromeDriverManager().install())\r\n\r\n try:\r\n driver = webdriver.Chrome(service=service, options=options)\r\n print(f\"[{GREEN}>{WHITE}] Driver setup completed\\n\")\r\n\r\n return driver\r\n\r\n except Exception:\r\n print(f\"[{RED}-{WHITE}] Error while driver setup\\n\")\r\n\r\n elif browser.lower() == 'f':\r\n options = FirefoxOptions()\r\n options.add_argument('--headless')\r\n\r\n try:\r\n driver = webdriver.Firefox(options=options)\r\n print(f\"[{GREEN}>{WHITE}] Driver setup completed\\n\")\r\n\r\n return driver\r\n\r\n except Exception:\r\n print(f\"[{RED}-{WHITE}] Error while driver setup\\n\")\r\n\r\n def amazon(p_n, output=False, file=None):\r\n if output:\r\n succes = 0\r\n\r\n driver = Amazon.setup_driver()\r\n print(f\"[{YELLOW}~{WHITE}] Phone number: {BLUE}{p_n}{WHITE}\")\r\n\r\n driver.get('https://www.amazon.com/ap/signin?openid.pape.max_auth_age=0&openid.return_to=https%3A%2F%2Fwww.amazon.com%2F%3F_encoding%3DUTF8%26ref_%3Dnav_ya_signin&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.assoc_handle=usflex&openid.mode=checkid_setup&openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&')\r\n print(f\"[{YELLOW}={WHITE}] Checking...\")\r\n\r\n try:\r\n driver.find_element(By.XPATH, '//*[@id=\"ap_email\"]').send_keys(p_n)\r\n\r\n driver.find_element(By.XPATH, '//*[@id=\"continue\"]').click()\r\n except:\r\n exit(f\"[{BLUE}INFO{WHITE}] Error\")\r\n\r\n try:\r\n element = driver.find_element(By.ID, 'auth-password-missing-alert')\r\n\r\n if element:\r\n print(f\"[{GREEN}>{WHITE}] Connected to Amazon\")\r\n if output != False:\r\n with open(file, 'w') as output_file:\r\n output_file.write(f\"[>] {p_n} is connected to Amazon\")\r\n succes += 1\r\n\r\n else:\r\n print(f\"[{RED}-{WHITE}] Not connected to Amazon\")\r\n if output != False:\r\n with open(file, 'w') as output_file:\r\n output_file.write(f\"[-] {p_n} is not connected to Amazon\")\r\n succes += 1\r\n\r\n except:\r\n print(f\"[{RED}-{WHITE}] Not connected to Amazon\")\r\n if output != False:\r\n with open(file, 'w') as output_file:\r\n output_file.write(f\"[-] {p_n} is not connected to Amazon\")\r\n succes += 1\r\n\r\n driver.quit()\r\n\r\n if output:\r\n print(f\"\\n[{GREEN}>{WHITE}] ✍️ Output saved ({GREEN}{succes}{WHITE})\")\r" }, { "identifier": "Annuaire", "path": "lib/annuaire.py", "snippet": "class Annuaire:\r\n def setup_driver():\r\n browser = input(f\"[{YELLOW}?{WHITE}] What's your browser? Chrome or Firefox? (c/f): \")\r\n\r\n if browser.lower() == 'c':\r\n options = Options()\r\n\r\n service = Service(ChromeDriverManager().install())\r\n\r\n try:\r\n driver = webdriver.Chrome(service=service, options=options)\r\n print(f\"[{GREEN}>{WHITE}] Driver setup completed\\n\")\r\n\r\n return driver\r\n\r\n except Exception:\r\n print(f\"[{RED}-{WHITE}] Error while driver setup\\n\")\r\n\r\n elif browser.lower() == 'f':\r\n options = FirefoxOptions()\r\n\r\n try:\r\n driver = webdriver.Firefox(options=options)\r\n print(f\"[{GREEN}>{WHITE}] Driver setup completed\\n\")\r\n\r\n return driver\r\n\r\n except Exception:\r\n print(f\"[{RED}-{WHITE}] Error while driver setup\\n\")\r\n\r\n def annuaire(p_n, output=False, file=None):\r\n if output:\r\n succes = 0\r\n\r\n driver = Annuaire.setup_driver()\r\n\r\n driver.get('https://www.pagesjaunes.fr/annuaireinverse')\r\n print(f\"[{RED}!{WHITE}] You have 10s for accept\")\r\n sleep(10)\r\n\r\n driver.find_element(By.XPATH, '//*[@id=\"quoiqui\"]').send_keys(p_n)\r\n sleep(1)\r\n\r\n driver.find_element(By.XPATH, '//*[@id=\"form_motor_pagesjaunes\"]/div/div[2]/div/button').click()\r\n sleep(1)\r\n\r\n try:\r\n pn = p_n.replace(\"+\", \"\")\r\n\r\n result = driver.find_element(By.XPATH, '//*[@id=\"SEL-nbresultat\"]')\r\n\r\n print(f\"\\n[{GREEN}>{WHITE}] {result.text} result{'s' if int(result.text.strip()) > 1 else ''} found in PageBlanche\")\r\n print(f\"[{YELLOW}={WHITE}] Link => {BLUE}https://www.pagesjaunes.fr/annuaireinverse/recherche?quoiqui={pn}&univers=annuaireinverse&idOu={WHITE}\")\r\n\r\n if output != False:\r\n with open(file, 'w') as output_file:\r\n output_file.write(f\"[>] {result.text} result{'s' if int(result.text.strip()) > 1 else ''} found in PageBlanche\\n[=] Link => https://www.pagesjaunes.fr/annuaireinverse/recherche?quoiqui={pn}&univers=annuaireinverse&idOu=\")\r\n succes += 1\r\n\r\n except:\r\n print(f\"\\n[{RED}-{WHITE}] Not result found in PageBlanche\")\r\n if output != False:\r\n with open(file, 'w') as output_file:\r\n output_file.write(\"[-] Not result found in PageBlanche\")\r\n succes += 1\r\n \r\n driver.quit()\r\n\r\n if output:\r\n print(f\"\\n[{GREEN}>{WHITE}] ✍️ Output saved ({GREEN}{succes}{WHITE})\")\r" } ]
import argparse import time from .lookup import lookup from .account import Amazon from .annuaire import Annuaire from .text import * from .verify import *
2,432
async def parser(): parse = argparse.ArgumentParser() parse.add_argument( '-t', '--target', nargs='?', type=str, default=None, help='get info by phone number' ) parse.add_argument( '-a', '--amazon', nargs='?', type=str, default=None, help='get confirmation whether Amazon linked by phone number' ) parse.add_argument( '-p', '--person', nargs='?', type=str, default=None, help='get owner of phone number with inversed annual (Page Blanche)' ) parse.add_argument( '-f', '--file', nargs='?', type=str, default=None, help='get info by a file containing phone numbers' ) parse.add_argument( '-v', '--verify', action='store_true', help='check your version, update(s), services...' ) parse.add_argument( '-o', '--output', nargs='?', type=str, default=None, help='give a file to save the output (only with args: --amazon/-a , --person/-p)' ) args = parse.parse_args() if args.file: start = time.time() with open(args.file, 'r') as file: if args.file.endswith(".txt"): nums = file.read().split('\n') checked = 0 for num in nums:
async def parser(): parse = argparse.ArgumentParser() parse.add_argument( '-t', '--target', nargs='?', type=str, default=None, help='get info by phone number' ) parse.add_argument( '-a', '--amazon', nargs='?', type=str, default=None, help='get confirmation whether Amazon linked by phone number' ) parse.add_argument( '-p', '--person', nargs='?', type=str, default=None, help='get owner of phone number with inversed annual (Page Blanche)' ) parse.add_argument( '-f', '--file', nargs='?', type=str, default=None, help='get info by a file containing phone numbers' ) parse.add_argument( '-v', '--verify', action='store_true', help='check your version, update(s), services...' ) parse.add_argument( '-o', '--output', nargs='?', type=str, default=None, help='give a file to save the output (only with args: --amazon/-a , --person/-p)' ) args = parse.parse_args() if args.file: start = time.time() with open(args.file, 'r') as file: if args.file.endswith(".txt"): nums = file.read().split('\n') checked = 0 for num in nums:
await lookup(num)
0
2023-12-30 13:21:14+00:00
4k
vpetersson/podcast-rss-generator
tests/test_rss_generator.py
[ { "identifier": "convert_iso_to_rfc2822", "path": "rss_generator.py", "snippet": "def convert_iso_to_rfc2822(iso_date):\n date_obj = datetime.fromisoformat(iso_date)\n return format_datetime(date_obj)" }, { "identifier": "generate_rss", "path": "rss_generator.py", "snippet": "def generate_rss(config, output_file_path):\n ET.register_namespace(\"itunes\", \"http://www.itunes.com/dtds/podcast-1.0.dtd\")\n ET.register_namespace(\"atom\", \"http://www.w3.org/2005/Atom\")\n\n # Global itunes:explicit setting\n global_explicit = (\n \"yes\" if config[\"metadata\"].get(\"itunes_explicit\", False) else \"no\"\n )\n\n rss = ET.Element(\n \"rss\",\n version=\"2.0\",\n attrib={\n \"xmlns:itunes\": \"http://www.itunes.com/dtds/podcast-1.0.dtd\",\n \"xmlns:atom\": \"http://www.w3.org/2005/Atom\",\n },\n )\n # Metadata\n channel = ET.SubElement(rss, \"channel\")\n metadata = config[\"metadata\"]\n ET.SubElement(channel, \"title\").text = metadata[\"title\"]\n ET.SubElement(channel, \"description\").text = format_description(\n metadata[\"description\"]\n )\n ET.SubElement(channel, \"language\").text = metadata.get(\"language\", \"en-us\")\n ET.SubElement(channel, \"link\").text = metadata[\"link\"]\n ET.SubElement(\n channel, \"generator\"\n ).text = (\n \"Podcast RSS Generator (https://github.com/vpetersson/podcast-rss-generator)\"\n )\n ET.SubElement(\n channel,\n \"atom:link\",\n href=metadata[\"rss_feed_url\"],\n rel=\"self\",\n type=\"application/rss+xml\",\n )\n\n # Adds explicit tag\n itunes_explicit = ET.SubElement(channel, \"itunes:explicit\")\n itunes_explicit.text = global_explicit\n\n # Add itunes:owner and itunes:email tags\n itunes_owner = ET.SubElement(channel, \"itunes:owner\")\n ET.SubElement(itunes_owner, \"itunes:email\").text = metadata[\"itunes_email\"]\n\n # Add itunes:author tag\n itunes_author = ET.SubElement(channel, \"itunes:author\")\n itunes_author.text = metadata[\"itunes_author\"]\n\n # Duplicate description to itunes summary\n itunes_summary = ET.SubElement(channel, \"itunes:summary\")\n itunes_summary.text = metadata[\"description\"]\n\n # Add itunes:category tag\n if \"itunes_category\" in metadata:\n ET.SubElement(channel, \"itunes:category\", text=metadata[\"itunes_category\"])\n\n if \"itunes_image\" in metadata:\n itunes_image = ET.SubElement(channel, \"itunes:image\")\n itunes_image.set(\"href\", metadata[\"itunes_image\"])\n\n # Episodes\n for episode in config[\"episodes\"]:\n print(f\"Processing episode {episode['title']}...\")\n\n # Don't pre-publish episodes\n if not datetime.fromisoformat(episode[\"publication_date\"]) < datetime.utcnow():\n print(\n f\"Skipping episode {episode['title']} as it's not scheduled to be released until {episode['publication_date']}.\"\n )\n continue\n\n file_info = get_file_info(episode[\"asset_url\"])\n item = ET.SubElement(channel, \"item\")\n ET.SubElement(item, \"pubDate\").text = convert_iso_to_rfc2822(\n episode[\"publication_date\"]\n )\n ET.SubElement(item, \"title\").text = episode[\"title\"]\n ET.SubElement(item, \"description\").text = format_description(\n episode[\"description\"]\n )\n ET.SubElement(item, \"guid\").text = episode[\"asset_url\"]\n ET.SubElement(\n item,\n \"enclosure\",\n url=episode[\"asset_url\"],\n type=file_info[\"content-type\"],\n length=str(file_info[\"content-length\"]),\n )\n\n # Apply global itunes:explicit setting to each episode\n itunes_explicit = ET.SubElement(item, \"itunes:explicit\")\n itunes_explicit.text = global_explicit\n\n # Add itunes:duration tag\n itunes_duration = ET.SubElement(item, \"itunes:duration\")\n itunes_duration.text = str(file_info[\"duration\"])\n\n # iTunes-specific tags\n if episode.get(\"episode\") is not None:\n itunes_episode = ET.SubElement(item, \"itunes:episode\")\n itunes_episode.text = str(episode[\"episode\"])\n\n if episode.get(\"season\") is not None:\n itunes_season = ET.SubElement(item, \"itunes:season\")\n itunes_season.text = str(episode[\"season\"])\n\n if episode.get(\"episode_type\") is not None:\n itunes_episode_type = ET.SubElement(item, \"itunes:episodeType\")\n itunes_episode_type.text = episode[\"episode_type\"]\n\n # Add link if available, if not, use global\n link = ET.SubElement(item, \"link\")\n link.text = episode.get(\"link\", metadata[\"link\"])\n\n # Use episode specific artwork if available\n itunes_image_url = episode.get(\"itunes_image\", metadata[\"itunes_image\"])\n\n # Creating the 'itunes:image' element with the determined URL\n itunes_image = ET.SubElement(item, \"itunes:image\")\n itunes_image.set(\"href\", itunes_image_url)\n\n tree = ET.ElementTree(rss)\n tree.write(output_file_path, encoding=\"UTF-8\", xml_declaration=True)" }, { "identifier": "get_file_info", "path": "rss_generator.py", "snippet": "def get_file_info(url):\n response = requests.head(url, allow_redirects=True)\n\n # Get duration of audio/video file\n # We're using the response.url here in order to\n # follow redirects and get the actual file\n\n probe = ffprobe(\n \"-hide_banner\",\n \"-v\",\n \"quiet\",\n \"-show_streams\",\n \"-print_format\",\n \"flat\",\n response.url,\n )\n lines = probe.split(\"\\n\")\n\n # Filtering out the line that contains 'streams.stream.0.duration'\n duration_line = next(\n (line for line in lines if line.startswith(\"streams.stream.0.duration=\")), None\n )\n\n if duration_line:\n # Extracting the numeric value and converting it to an integer\n duration = int(float(duration_line.split(\"=\")[1].strip('\"')))\n else:\n duration = None\n\n return {\n \"content-length\": response.headers.get(\"content-length\"),\n \"content-type\": response.headers.get(\"content-type\"),\n \"duration\": duration,\n }" }, { "identifier": "read_podcast_config", "path": "rss_generator.py", "snippet": "def read_podcast_config(yaml_file_path):\n with open(yaml_file_path, \"r\", encoding=\"utf-8\") as file:\n return yaml.safe_load(file)" } ]
import os import unittest from xml.etree import ElementTree as ET from rss_generator import (convert_iso_to_rfc2822, generate_rss, get_file_info, read_podcast_config)
1,682
CONFIG_FILE = "podcast_config.example.yaml" class TestRSSGenerator(unittest.TestCase): @classmethod def setUpClass(cls): # Read the configuration and generate the RSS feed once for all tests
CONFIG_FILE = "podcast_config.example.yaml" class TestRSSGenerator(unittest.TestCase): @classmethod def setUpClass(cls): # Read the configuration and generate the RSS feed once for all tests
cls.config = read_podcast_config(CONFIG_FILE)
3
2023-12-23 09:47:39+00:00
4k
SkierProjects/MultiLabelImageClassificationPytorch
src/utils/dataset/image_dataset.py
[ { "identifier": "config", "path": "src/config.py", "snippet": "class config:\n \"\"\"\n Configuration class for holding model and training parameters.\n \"\"\"\n\n # Default static property values\n model_name = 'regnet_y_16gf'\n model_requires_grad = True\n num_classes = 31\n model_dropout_prob = 10\n model_weights = 'IMAGENET1K_SWAG_E2E_V1'\n image_size = 384\n batch_size = 18\n learning_rate = 1e-4\n num_epochs = 50\n continue_training = True\n model_name_to_load = \"best_model\"\n early_stopping_patience = 8\n early_stopping_threshold = 8e-3\n learningrate_reducer_patience = 3\n learningrate_reducer_threshold = 2e-2\n learningrate_reducer_factor = 0.1\n learningrate_reducer_min_lr = 1e-7\n\n embedding_layer_enabled = False\n embedding_layer_dimension = 64\n gcn_enabled = True\n gcn_model_name = \"GAT\"\n # Define the edges\n gcn_edge_index = torch.tensor(\n [[18, 19, 13, 19, 26, 19, 2, 3, 14, 19, 1, 19, 29, 19, 8, 19, 17, 26,\n 21, 19, 6, 19, 2, 19, 2, 1, 15, 19, 5, 19, 7, 19, 2, 21, 25, 19,\n 25, 8, 16, 19, 10, 14, 8, 6, 15, 8, 14, 8, 22, 23, 6, 14, 8, 13,\n 16, 0, 8, 16, 8, 5, 7, 18, 5, 15, 23, 28, 2, 0, 11, 19, 14, 1,\n 5, 16, 14, 29, 17, 19, 23, 11, 6, 7, 22, 0, 22, 19, 30, 19, 28, 19,\n 22, 30, 5, 0, 21, 0, 28, 28, 28, 10, 19, 16, 9, 5, 24, 21, 24, 30,\n 28, 21, 18, 0, 19, 11, 3, 3, 3, 5],\n [19, 18, 19, 13, 19, 26, 3, 2, 19, 14, 19, 1, 19, 29, 19, 8, 26, 17,\n 19, 21, 19, 6, 19, 2, 1, 2, 19, 15, 19, 5, 19, 7, 21, 2, 19, 25,\n 8, 25, 19, 16, 14, 10, 6, 8, 8, 15, 8, 14, 23, 22, 14, 6, 13, 8,\n 0, 16, 16, 8, 5, 8, 18, 7, 15, 5, 28, 23, 0, 2, 19, 11, 1, 14,\n 16, 5, 29, 14, 19, 17, 11, 23, 7, 6, 0, 22, 19, 22, 19, 30, 19, 28,\n 30, 22, 0, 5, 0, 21, 11, 10, 29, 19, 10, 9, 16, 24, 5, 24, 21, 28,\n 30, 18, 21, 19, 0, 12, 1, 11, 10, 9]])\n gcn_edge_weights = torch.tensor(\n [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,\n 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,\n 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,\n 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,\n 1.0000, 1.0000, 1.0000, 1.0000, 0.5000, 0.5000, 0.5000, 0.5000,\n 0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000,\n 0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000,\n 0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000,\n 0.5000, 0.5000, 0.5000, 0.5000, 0.8000, 0.8000, 0.8000, 0.8000,\n 0.8000, 0.8000, 0.8000, 0.8000, 0.8000, 0.8000, 0.8000, 0.8000,\n 0.8000, 0.8000, 0.8000, 0.8000, 0.8000, 0.8000, 0.8000, 0.8000,\n 0.8000, 0.8000, 0.8000, 0.8000, 0.8000, 0.8000, 0.8000, 0.8000,\n 1.0000, 1.0000, 1.0000, -0.5000, -0.5000, -0.5000, -0.5000, -0.5000,\n -0.5000, -0.5000, -0.5000, -0.5000, -0.5000, -0.5000, -0.5000, -0.5000,\n -0.5000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000])\n\n log_level = \"DEBUG\"\n\n dataset_file_name = \"dataset.csv\"\n\n model_to_load_raw_weights = \"regnet_y_16gf_384_0.7273\"\n\n store_gradients_epoch_interval = 5\n\n check_test_loss_epoch_interval = 10\n\n dataset_normalization_mean = [0.4805, 0.3967, 0.3589]\n dataset_normalization_std = [0.3207, 0.2930, 0.2824]\n train_percentage = 80\n valid_percentage = 10\n test_percentage = 10\n\n def __init__(self, config_path=None):\n \"\"\"\n Initialize a new Config instance, optionally loading values from a JSON file.\n\n Parameters:\n - config_path: str (optional), path to a JSON file with configuration values.\n \"\"\"\n if config_path:\n self.load_from_json(config_path)\n\n @classmethod\n def load_from_json(cls, config_data):\n \"\"\"\n Load configuration data from a dictionary, typically loaded from a JSON file,\n and update the configuration instance.\n\n Parameters:\n - config_data: dict, dictionary containing configuration keys and values.\n\n Returns:\n - Config instance with updated values.\n \"\"\"\n new_instance = cls() # Create a new instance with default values\n for key, value in config_data.items():\n normalized_key = key.lower() # Normalize the key to lowercase\n if hasattr(new_instance, normalized_key):\n setattr(new_instance, normalized_key, value)\n return new_instance" }, { "identifier": "LoggerFactory", "path": "src/utils/logging/loggerfactory.py", "snippet": "class LoggerFactory:\n DEFAULT_LOG_LEVEL = logging.INFO\n LOG_FILE_MAX_BYTES = 10 * 1024 * 1024 # 10 MB\n LOG_FILE_BACKUP_COUNT = 5 # Keep 5 backup files\n LONG_LOG_FORMAT = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n SHORT_LOG_FORMAT = \"%(levelname)s: %(message)s\"\n DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\n @staticmethod\n def setup_logging(loggername, log_file=None, level=None, config=config):\n \"\"\"\n Set up logging configuration for a logger with the specified name.\n\n Parameters:\n logger_name (str): The name of the logger to set up.\n log_file (str): The path to the log file. If None, logs to stdout.\n level (int): The logging level. If None, defaults to the level specified in config.\n config (module): The configuration module with a 'log_level' attribute.\n\n Returns:\n logging.Logger: Configured logger instance.\n \"\"\"\n if level is None:\n level = getattr(logging, config.log_level, LoggerFactory.DEFAULT_LOG_LEVEL)\n \n # Since we are setting up handlers individually, we don't use basicConfig\n logger = logging.getLogger(loggername)\n logger.setLevel(level)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(logging.Formatter(LoggerFactory.SHORT_LOG_FORMAT))\n logger.addHandler(console_handler)\n\n if log_file is not None:\n os.makedirs(os.path.dirname(log_file), exist_ok=True)\n file_handler = logging.handlers.RotatingFileHandler(\n log_file, maxBytes=LoggerFactory.LOG_FILE_MAX_BYTES, backupCount=LoggerFactory.LOG_FILE_BACKUP_COUNT)\n file_handler.setFormatter(logging.Formatter(LoggerFactory.LONG_LOG_FORMAT, LoggerFactory.DATE_FORMAT))\n logger.addHandler(file_handler)\n\n return logger\n\n @staticmethod\n def get_logger(name):\n \"\"\"\n Get a logger with the specified name.\n\n Parameters:\n name (str): The name of the logger to retrieve.\n\n Returns:\n logging.Logger: The logger instance with the given name.\n \"\"\"\n return logging.getLogger(name)" } ]
import torch import hashlib import cv2 import numpy as np import torchvision.transforms as transforms import pandas as pd from torch.utils.data import Dataset from sklearn.model_selection import train_test_split from src.config import config from src.utils.logging.loggerfactory import LoggerFactory
2,946
logger = LoggerFactory.get_logger(f"logger.{__name__}") class ImageDataset(Dataset): """ A dataset class for loading and transforming images for model training and evaluation. """
logger = LoggerFactory.get_logger(f"logger.{__name__}") class ImageDataset(Dataset): """ A dataset class for loading and transforming images for model training and evaluation. """
def __init__(self, csv, mode, random_state=42, config=config):
0
2023-12-25 18:45:52+00:00
4k
the-seeds/imitater
src/imitater/service/app.py
[ { "identifier": "ChatModel", "path": "src/imitater/model/chat_model.py", "snippet": "class ChatModel:\n def __init__(self) -> None:\n if int(os.environ.get(\"ENABLE_ATTN_BIAS\")):\n llama_attn_bias_monkey_patch()\n\n engine_args = AsyncEngineArgs(model=os.environ.get(\"CHAT_MODEL_PATH\"))\n\n if os.environ.get(\"CHAT_MODEL_DEVICE\"):\n engine_args.tensor_parallel_size = len(os.environ.get(\"CHAT_MODEL_DEVICE\").split(\",\"))\n\n self._engine = AsyncLLMEngine.from_engine_args(engine_args)\n\n self._tokenizer: \"PreTrainedTokenizerBase\" = AutoTokenizer.from_pretrained(\n pretrained_model_name_or_path=os.environ.get(\"CHAT_MODEL_PATH\")\n )\n self._load_generation_config()\n\n def _load_generation_config(self):\n try:\n self._generation_config = GenerationConfig.from_pretrained(\n pretrained_model_name=os.environ.get(\"CHAT_MODEL_PATH\")\n )\n except Exception:\n self._generation_config = GenerationConfig(\n pad_token_id=self._tokenizer.pad_token_id,\n bos_token_id=self._tokenizer.bos_token_id,\n eos_token_id=self._tokenizer.eos_token_id,\n )\n\n if not self._generation_config.temperature:\n self._generation_config.temperature = 1.0\n\n if not self._generation_config.top_p:\n self._generation_config.top_p = 1.0\n\n if not self._generation_config.max_new_tokens:\n self._generation_config.max_new_tokens = 1024\n\n if isinstance(self._generation_config.eos_token_id, int):\n self._generation_config.eos_token_id = [self._generation_config.eos_token_id]\n\n async def _generate(\n self, messages: List[Dict[str, str]], request_id: str, **gen_kwargs\n ) -> AsyncIterator[\"RequestOutput\"]:\n input_ids = self._tokenizer.apply_chat_template(\n conversation=messages, tokenize=True, add_generation_prompt=True\n )\n sampline_params = SamplingParams(\n temperature=gen_kwargs.get(\"temperature\", None) or self._generation_config.temperature,\n top_p=gen_kwargs.get(\"top_p\", None) or self._generation_config.top_p,\n max_tokens=gen_kwargs.get(\"max_tokens\", None) or self._generation_config.max_new_tokens,\n stop_token_ids=self._generation_config.eos_token_id,\n )\n result_generator = self._engine.generate(\n prompt=None, sampling_params=sampline_params, request_id=request_id, prompt_token_ids=input_ids\n )\n return result_generator\n\n async def chat(self, messages: List[Dict[str, str]], request_id: str, **gen_kwargs) -> str:\n generator = await self._generate(messages, request_id, **gen_kwargs)\n prev_text = \"\"\n async for result in generator:\n prev_text = result.outputs[0].text\n return prev_text\n\n async def stream_chat(\n self, messages: List[Dict[str, str]], request_id: str, **gen_kwargs\n ) -> Generator[str, None, None]:\n generator = await self._generate(messages, request_id, **gen_kwargs)\n prev_text = \"\"\n async for result in generator:\n delta_text = result.outputs[0].text[len(prev_text) :]\n prev_text = result.outputs[0].text\n yield delta_text" }, { "identifier": "EmbedModel", "path": "src/imitater/model/embed_model.py", "snippet": "class EmbedModel:\n def __init__(self, max_tasks: Optional[int] = 5) -> None:\n self._semaphore = asyncio.Semaphore(max_tasks)\n self._batch_size = int(os.environ.get(\"EMBED_BATCH_SIZE\"))\n self._model: \"PreTrainedModel\" = AutoModel.from_pretrained(\n pretrained_model_name_or_path=os.environ.get(\"EMBED_MODEL_PATH\"),\n device_map={\"\": int(os.environ.get(\"EMBED_MODEL_DEVICE\"))},\n torch_dtype=torch.float16,\n )\n self._model.eval()\n self._tokenizer: \"PreTrainedTokenizerBase\" = AutoTokenizer.from_pretrained(\n pretrained_model_name_or_path=os.environ.get(\"EMBED_MODEL_PATH\")\n )\n self._tokenizer.padding_side = \"right\"\n\n async def _run_task(self, batch_encoding: \"BatchEncoding\") -> List[List[float]]:\n async with self._semaphore:\n loop = asyncio.get_running_loop()\n return await loop.run_in_executor(None, _get_embeddings, self._model, batch_encoding)\n\n async def __call__(self, texts: List[str]) -> List[List[float]]:\n results = []\n for i in range(0, len(texts), self._batch_size):\n batch_encoding = self._tokenizer(\n texts[i : i + self._batch_size], padding=True, truncation=True, return_tensors=\"pt\"\n )\n embeddings = await self._run_task(batch_encoding)\n results.extend(embeddings)\n\n return results" }, { "identifier": "dictify", "path": "src/imitater/utils/generic.py", "snippet": "def dictify(data: \"BaseModel\") -> Dict[str, Any]:\n try: # pydantic v2\n return data.model_dump(exclude_unset=True)\n except Exception: # pydantic v1\n return data.dict(exclude_unset=True)" }, { "identifier": "jsonify", "path": "src/imitater/utils/generic.py", "snippet": "def jsonify(data: \"BaseModel\") -> str:\n try: # pydantic v2\n return json.dumps(data.model_dump(exclude_unset=True), ensure_ascii=False)\n except Exception: # pydantic v1\n return data.json(exclude_unset=True, ensure_ascii=False)" }, { "identifier": "torch_gc", "path": "src/imitater/utils/generic.py", "snippet": "def torch_gc() -> None:\n r\"\"\"\n Collects GPU memory.\n \"\"\"\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n torch.cuda.ipc_collect()" }, { "identifier": "ChatCompletionRequest", "path": "src/imitater/service/protocol.py", "snippet": "class ChatCompletionRequest(BaseModel):\n model: str\n messages: List[ChatMessage]\n temperature: Optional[float] = None\n top_p: Optional[float] = None\n n: Optional[int] = 1\n max_tokens: Optional[int] = None\n stream: Optional[bool] = False" }, { "identifier": "ChatCompletionResponse", "path": "src/imitater/service/protocol.py", "snippet": "class ChatCompletionResponse(BaseModel):\n id: str\n object: Optional[str] = \"chat.completion\"\n created: Optional[int] = Field(default_factory=lambda: int(time.time()))\n model: str\n choices: List[ChatCompletionResponseChoice]\n usage: UsageInfo" }, { "identifier": "ChatCompletionResponseChoice", "path": "src/imitater/service/protocol.py", "snippet": "class ChatCompletionResponseChoice(BaseModel):\n index: int\n message: ChatMessage\n finish_reason: Finish" }, { "identifier": "ChatCompletionStreamResponse", "path": "src/imitater/service/protocol.py", "snippet": "class ChatCompletionStreamResponse(BaseModel):\n id: str\n object: Optional[str] = \"chat.completion.chunk\"\n created: Optional[int] = Field(default_factory=lambda: int(time.time()))\n model: str\n choices: List[ChatCompletionStreamResponseChoice]" }, { "identifier": "ChatCompletionStreamResponseChoice", "path": "src/imitater/service/protocol.py", "snippet": "class ChatCompletionStreamResponseChoice(BaseModel):\n index: int\n delta: DeltaMessage\n finish_reason: Optional[Finish] = None" }, { "identifier": "ChatMessage", "path": "src/imitater/service/protocol.py", "snippet": "class ChatMessage(BaseModel):\n role: Role\n content: str" }, { "identifier": "DeltaMessage", "path": "src/imitater/service/protocol.py", "snippet": "class DeltaMessage(BaseModel):\n role: Optional[Role] = None\n content: Optional[str] = None" }, { "identifier": "Embeddings", "path": "src/imitater/service/protocol.py", "snippet": "class Embeddings(BaseModel):\n object: Optional[str] = \"embedding\"\n embedding: List[float]\n index: int" }, { "identifier": "EmbeddingsRequest", "path": "src/imitater/service/protocol.py", "snippet": "class EmbeddingsRequest(BaseModel):\n input: Union[str, List[str]]\n model: str\n encoding_format: Optional[str] = \"float\"" }, { "identifier": "EmbeddingsResponse", "path": "src/imitater/service/protocol.py", "snippet": "class EmbeddingsResponse(BaseModel):\n object: Optional[str] = \"list\"\n data: List[Embeddings]\n model: str\n usage: UsageInfo" }, { "identifier": "Finish", "path": "src/imitater/service/protocol.py", "snippet": "class Finish(str, Enum):\n STOP = \"stop\"\n LENGTH = \"length\"" }, { "identifier": "ModelCard", "path": "src/imitater/service/protocol.py", "snippet": "class ModelCard(BaseModel):\n id: str\n object: Optional[str] = \"model\"\n created: Optional[int] = Field(default_factory=lambda: int(time.time()))\n owned_by: Optional[str] = \"owner\"" }, { "identifier": "ModelList", "path": "src/imitater/service/protocol.py", "snippet": "class ModelList(BaseModel):\n object: Optional[str] = \"list\"\n data: Optional[List[ModelCard]] = []" }, { "identifier": "Role", "path": "src/imitater/service/protocol.py", "snippet": "class Role(str, Enum):\n USER = \"user\"\n ASSISTANT = \"assistant\"\n SYSTEM = \"system\"" }, { "identifier": "UsageInfo", "path": "src/imitater/service/protocol.py", "snippet": "class UsageInfo(BaseModel):\n prompt_tokens: int\n completion_tokens: Optional[int] = None\n total_tokens: int" } ]
import os import uuid import uvicorn from contextlib import asynccontextmanager from typing import Any, Dict from fastapi import FastAPI, status from fastapi.middleware.cors import CORSMiddleware from sse_starlette import EventSourceResponse from ..model.chat_model import ChatModel from ..model.embed_model import EmbedModel from ..utils.generic import dictify, jsonify, torch_gc from .protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseChoice, ChatCompletionStreamResponse, ChatCompletionStreamResponseChoice, ChatMessage, DeltaMessage, Embeddings, EmbeddingsRequest, EmbeddingsResponse, Finish, ModelCard, ModelList, Role, UsageInfo, )
2,869
@asynccontextmanager async def lifespan(app: "FastAPI") -> None: yield torch_gc() def launch_app() -> None: app = FastAPI(lifespan=lifespan) chat_model = ChatModel() embed_model = EmbedModel() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"] ) @app.get("/v1/models", response_model=ModelList) async def list_models(): model_card = ModelCard(id="gpt-3.5-turbo") return ModelList(data=[model_card]) @app.post("/v1/embeddings", response_model=EmbeddingsResponse, status_code=status.HTTP_200_OK) async def create_embeddings(request: EmbeddingsRequest): texts = request.input if isinstance(texts, str): texts = [texts] embed_output = await embed_model(texts) embeddings = [] for i in range(len(embed_output)): embeddings.append(Embeddings(embedding=embed_output[i], index=i)) return EmbeddingsResponse( data=embeddings, model=request.model, usage=UsageInfo(prompt_tokens=0, completion_tokens=None, total_tokens=0), ) @app.post("/v1/chat/completions", response_model=ChatCompletionResponse, status_code=status.HTTP_200_OK) async def create_chat_completion(request: ChatCompletionRequest): input_kwargs = { "messages": [dictify(message) for message in request.messages], "request_id": "chatcmpl-{}".format(uuid.uuid4().hex), "temperature": request.temperature, "top_p": request.top_p, "max_tokens": request.max_tokens, } if request.stream: generator = create_stream_chat_completion(request, input_kwargs) return EventSourceResponse(generator, media_type="text/event-stream") response = await chat_model.chat(**input_kwargs) choice = ChatCompletionResponseChoice( index=0, message=ChatMessage(role=Role.ASSISTANT, content=response), finish_reason=Finish.STOP ) return ChatCompletionResponse( id=input_kwargs["request_id"], model=request.model, choices=[choice], usage=UsageInfo(prompt_tokens=0, completion_tokens=0, total_tokens=0), ) async def create_stream_chat_completion(request: ChatCompletionRequest, input_kwargs: Dict[str, Any]): choice = ChatCompletionStreamResponseChoice( index=0, delta=DeltaMessage(role=Role.ASSISTANT, content=""), finish_reason=None ) chunk = ChatCompletionStreamResponse(id=input_kwargs["request_id"], model=request.model, choices=[choice])
@asynccontextmanager async def lifespan(app: "FastAPI") -> None: yield torch_gc() def launch_app() -> None: app = FastAPI(lifespan=lifespan) chat_model = ChatModel() embed_model = EmbedModel() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"] ) @app.get("/v1/models", response_model=ModelList) async def list_models(): model_card = ModelCard(id="gpt-3.5-turbo") return ModelList(data=[model_card]) @app.post("/v1/embeddings", response_model=EmbeddingsResponse, status_code=status.HTTP_200_OK) async def create_embeddings(request: EmbeddingsRequest): texts = request.input if isinstance(texts, str): texts = [texts] embed_output = await embed_model(texts) embeddings = [] for i in range(len(embed_output)): embeddings.append(Embeddings(embedding=embed_output[i], index=i)) return EmbeddingsResponse( data=embeddings, model=request.model, usage=UsageInfo(prompt_tokens=0, completion_tokens=None, total_tokens=0), ) @app.post("/v1/chat/completions", response_model=ChatCompletionResponse, status_code=status.HTTP_200_OK) async def create_chat_completion(request: ChatCompletionRequest): input_kwargs = { "messages": [dictify(message) for message in request.messages], "request_id": "chatcmpl-{}".format(uuid.uuid4().hex), "temperature": request.temperature, "top_p": request.top_p, "max_tokens": request.max_tokens, } if request.stream: generator = create_stream_chat_completion(request, input_kwargs) return EventSourceResponse(generator, media_type="text/event-stream") response = await chat_model.chat(**input_kwargs) choice = ChatCompletionResponseChoice( index=0, message=ChatMessage(role=Role.ASSISTANT, content=response), finish_reason=Finish.STOP ) return ChatCompletionResponse( id=input_kwargs["request_id"], model=request.model, choices=[choice], usage=UsageInfo(prompt_tokens=0, completion_tokens=0, total_tokens=0), ) async def create_stream_chat_completion(request: ChatCompletionRequest, input_kwargs: Dict[str, Any]): choice = ChatCompletionStreamResponseChoice( index=0, delta=DeltaMessage(role=Role.ASSISTANT, content=""), finish_reason=None ) chunk = ChatCompletionStreamResponse(id=input_kwargs["request_id"], model=request.model, choices=[choice])
yield jsonify(chunk)
3
2023-12-31 07:21:06+00:00
4k
dan-r/HomeAssistant-Ohme
custom_components/ohme/api_client.py
[ { "identifier": "DOMAIN", "path": "custom_components/ohme/const.py", "snippet": "DOMAIN = \"ohme\"" }, { "identifier": "USER_AGENT", "path": "custom_components/ohme/const.py", "snippet": "USER_AGENT = \"dan-r-homeassistant-ohme\"" }, { "identifier": "INTEGRATION_VERSION", "path": "custom_components/ohme/const.py", "snippet": "INTEGRATION_VERSION = \"0.3.1\"" }, { "identifier": "time_next_occurs", "path": "custom_components/ohme/utils.py", "snippet": "def time_next_occurs(hour, minute):\n \"\"\"Find when this time next occurs.\"\"\"\n current = datetime.now()\n target = current.replace(hour=hour, minute=minute, second=0, microsecond=0)\n while target <= current:\n target = target + timedelta(days=1)\n\n return target" } ]
import aiohttp import logging import json from time import time from datetime import datetime, timedelta from homeassistant.helpers.entity import DeviceInfo from .const import DOMAIN, USER_AGENT, INTEGRATION_VERSION from .utils import time_next_occurs
2,428
def is_capable(self, capability): """Return whether or not this model has a given capability.""" return bool(self._capabilities[capability]) def get_device_info(self): return self._device_info def get_unique_id(self, name): return f"ohme_{self._serial}_{name}" # Push methods async def async_pause_charge(self): """Pause an ongoing charge""" result = await self._post_request(f"/v1/chargeSessions/{self._serial}/stop", skip_json=True) return bool(result) async def async_resume_charge(self): """Resume a paused charge""" result = await self._post_request(f"/v1/chargeSessions/{self._serial}/resume", skip_json=True) return bool(result) async def async_approve_charge(self): """Approve a charge""" result = await self._put_request(f"/v1/chargeSessions/{self._serial}/approve?approve=true") return bool(result) async def async_max_charge(self): """Enable max charge""" result = await self._put_request(f"/v1/chargeSessions/{self._serial}/rule?maxCharge=true") return bool(result) async def async_apply_session_rule(self, max_price=None, target_time=None, target_percent=None, pre_condition=None, pre_condition_length=None): """Apply rule to ongoing charge/stop max charge.""" # Check every property. If we've provided it, use that. If not, use the existing. if max_price is None: max_price = self._last_rule['settings'][0]['enabled'] if 'settings' in self._last_rule and len( self._last_rule['settings']) > 1 else False if target_percent is None: target_percent = self._last_rule['targetPercent'] if 'targetPercent' in self._last_rule else 80 if pre_condition is None: pre_condition = self._last_rule['preconditioningEnabled'] if 'preconditioningEnabled' in self._last_rule else False if pre_condition_length is None: pre_condition_length = self._last_rule[ 'preconditionLengthMins'] if 'preconditionLengthMins' in self._last_rule else 30 if target_time is None: # Default to 9am target_time = self._last_rule['targetTime'] if 'targetTime' in self._last_rule else 32400 target_time = (target_time // 3600, (target_time % 3600) // 60) target_ts = int(time_next_occurs( target_time[0], target_time[1]).timestamp() * 1000) # Convert these to string form max_price = 'true' if max_price else 'false' pre_condition = 'true' if pre_condition else 'false' result = await self._put_request(f"/v1/chargeSessions/{self._serial}/rule?enableMaxPrice={max_price}&targetTs={target_ts}&enablePreconditioning={pre_condition}&toPercent={target_percent}&preconditionLengthMins={pre_condition_length}") return bool(result) async def async_get_schedule(self): """Get the first schedule.""" schedules = await self._get_request("/v1/chargeRules") return schedules[0] if len(schedules) > 0 else None async def async_update_schedule(self, target_percent=None, target_time=None): """Update the first listed schedule.""" rule = await self.async_get_schedule() # Account for user having no rules if not rule: return None # Update percent and time if provided if target_percent is not None: rule['targetPercent'] = target_percent if target_time is not None: rule['targetTime'] = (target_time[0] * 3600) + (target_time[1] * 60) await self._put_request(f"/v1/chargeRules/{rule['id']}", data=rule) return True async def async_set_configuration_value(self, values): """Set a configuration value or values.""" result = await self._put_request(f"/v1/chargeDevices/{self._serial}/appSettings", data=values) return bool(result) # Pull methods async def async_get_charge_sessions(self, is_retry=False): """Try to fetch charge sessions endpoint. If we get a non 200 response, refresh auth token and try again""" resp = await self._get_request('/v1/chargeSessions') resp = resp[0] # Cache the current rule if we are given it if resp["mode"] == "SMART_CHARGE" and 'appliedRule' in resp: self._last_rule = resp["appliedRule"] return resp async def async_get_account_info(self): resp = await self._get_request('/v1/users/me/account') return resp async def async_update_device_info(self, is_retry=False): """Update _device_info with our charger model.""" resp = await self.async_get_account_info() device = resp['chargeDevices'][0] info = DeviceInfo(
_LOGGER = logging.getLogger(__name__) GOOGLE_API_KEY = "AIzaSyC8ZeZngm33tpOXLpbXeKfwtyZ1WrkbdBY" class OhmeApiClient: """API client for Ohme EV chargers.""" def __init__(self, email, password): if email is None or password is None: raise Exception("Credentials not provided") # Credentials from configuration self._email = email self._password = password # Charger and its capabilities self._device_info = None self._capabilities = {} self._ct_connected = False # Authentication self._token_birth = 0 self._token = None self._refresh_token = None # User info self._user_id = "" self._serial = "" # Cache the last rule to use when we disable max charge or change schedule self._last_rule = {} # Sessions self._session = aiohttp.ClientSession( base_url="https://api.ohme.io") self._auth_session = aiohttp.ClientSession() # Auth methods async def async_create_session(self): """Refresh the user auth token from the stored credentials.""" async with self._auth_session.post( f"https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key={GOOGLE_API_KEY}", data={"email": self._email, "password": self._password, "returnSecureToken": True} ) as resp: if resp.status != 200: return None resp_json = await resp.json() self._token_birth = time() self._token = resp_json['idToken'] self._refresh_token = resp_json['refreshToken'] return True async def async_refresh_session(self): """Refresh auth token if needed.""" if self._token is None: return await self.async_create_session() # Don't refresh token unless its over 45 mins old if time() - self._token_birth < 2700: return async with self._auth_session.post( f"https://securetoken.googleapis.com/v1/token?key={GOOGLE_API_KEY}", data={"grantType": "refresh_token", "refreshToken": self._refresh_token} ) as resp: if resp.status != 200: text = await resp.text() msg = f"Ohme auth refresh error: {text}" _LOGGER.error(msg) raise AuthException(msg) resp_json = await resp.json() self._token_birth = time() self._token = resp_json['id_token'] self._refresh_token = resp_json['refresh_token'] return True # Internal methods def _last_second_of_month_timestamp(self): """Get the last second of this month.""" dt = datetime.today() dt = dt.replace(day=1) + timedelta(days=32) dt = dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - timedelta(seconds=1) return int(dt.timestamp()*1e3) async def _handle_api_error(self, url, resp): """Raise an exception if API response failed.""" if resp.status != 200: text = await resp.text() msg = f"Ohme API response error: {url}, {resp.status}; {text}" _LOGGER.error(msg) raise ApiException(msg) def _get_headers(self): """Get auth and content-type headers""" return { "Authorization": "Firebase %s" % self._token, "Content-Type": "application/json", "User-Agent": f"{USER_AGENT}/{INTEGRATION_VERSION}" } async def _post_request(self, url, skip_json=False, data=None): """Make a POST request.""" await self.async_refresh_session() async with self._session.post( url, data=data, headers=self._get_headers() ) as resp: _LOGGER.debug(f"POST request to {url}, status code {resp.status}") await self._handle_api_error(url, resp) if skip_json: return await resp.text() return await resp.json() async def _put_request(self, url, data=None): """Make a PUT request.""" await self.async_refresh_session() async with self._session.put( url, data=json.dumps(data), headers=self._get_headers() ) as resp: _LOGGER.debug(f"PUT request to {url}, status code {resp.status}") await self._handle_api_error(url, resp) return True async def _get_request(self, url): """Make a GET request.""" await self.async_refresh_session() async with self._session.get( url, headers=self._get_headers() ) as resp: _LOGGER.debug(f"GET request to {url}, status code {resp.status}") await self._handle_api_error(url, resp) return await resp.json() # Simple getters def ct_connected(self): """Is CT clamp connected.""" return self._ct_connected def is_capable(self, capability): """Return whether or not this model has a given capability.""" return bool(self._capabilities[capability]) def get_device_info(self): return self._device_info def get_unique_id(self, name): return f"ohme_{self._serial}_{name}" # Push methods async def async_pause_charge(self): """Pause an ongoing charge""" result = await self._post_request(f"/v1/chargeSessions/{self._serial}/stop", skip_json=True) return bool(result) async def async_resume_charge(self): """Resume a paused charge""" result = await self._post_request(f"/v1/chargeSessions/{self._serial}/resume", skip_json=True) return bool(result) async def async_approve_charge(self): """Approve a charge""" result = await self._put_request(f"/v1/chargeSessions/{self._serial}/approve?approve=true") return bool(result) async def async_max_charge(self): """Enable max charge""" result = await self._put_request(f"/v1/chargeSessions/{self._serial}/rule?maxCharge=true") return bool(result) async def async_apply_session_rule(self, max_price=None, target_time=None, target_percent=None, pre_condition=None, pre_condition_length=None): """Apply rule to ongoing charge/stop max charge.""" # Check every property. If we've provided it, use that. If not, use the existing. if max_price is None: max_price = self._last_rule['settings'][0]['enabled'] if 'settings' in self._last_rule and len( self._last_rule['settings']) > 1 else False if target_percent is None: target_percent = self._last_rule['targetPercent'] if 'targetPercent' in self._last_rule else 80 if pre_condition is None: pre_condition = self._last_rule['preconditioningEnabled'] if 'preconditioningEnabled' in self._last_rule else False if pre_condition_length is None: pre_condition_length = self._last_rule[ 'preconditionLengthMins'] if 'preconditionLengthMins' in self._last_rule else 30 if target_time is None: # Default to 9am target_time = self._last_rule['targetTime'] if 'targetTime' in self._last_rule else 32400 target_time = (target_time // 3600, (target_time % 3600) // 60) target_ts = int(time_next_occurs( target_time[0], target_time[1]).timestamp() * 1000) # Convert these to string form max_price = 'true' if max_price else 'false' pre_condition = 'true' if pre_condition else 'false' result = await self._put_request(f"/v1/chargeSessions/{self._serial}/rule?enableMaxPrice={max_price}&targetTs={target_ts}&enablePreconditioning={pre_condition}&toPercent={target_percent}&preconditionLengthMins={pre_condition_length}") return bool(result) async def async_get_schedule(self): """Get the first schedule.""" schedules = await self._get_request("/v1/chargeRules") return schedules[0] if len(schedules) > 0 else None async def async_update_schedule(self, target_percent=None, target_time=None): """Update the first listed schedule.""" rule = await self.async_get_schedule() # Account for user having no rules if not rule: return None # Update percent and time if provided if target_percent is not None: rule['targetPercent'] = target_percent if target_time is not None: rule['targetTime'] = (target_time[0] * 3600) + (target_time[1] * 60) await self._put_request(f"/v1/chargeRules/{rule['id']}", data=rule) return True async def async_set_configuration_value(self, values): """Set a configuration value or values.""" result = await self._put_request(f"/v1/chargeDevices/{self._serial}/appSettings", data=values) return bool(result) # Pull methods async def async_get_charge_sessions(self, is_retry=False): """Try to fetch charge sessions endpoint. If we get a non 200 response, refresh auth token and try again""" resp = await self._get_request('/v1/chargeSessions') resp = resp[0] # Cache the current rule if we are given it if resp["mode"] == "SMART_CHARGE" and 'appliedRule' in resp: self._last_rule = resp["appliedRule"] return resp async def async_get_account_info(self): resp = await self._get_request('/v1/users/me/account') return resp async def async_update_device_info(self, is_retry=False): """Update _device_info with our charger model.""" resp = await self.async_get_account_info() device = resp['chargeDevices'][0] info = DeviceInfo(
identifiers={(DOMAIN, "ohme_charger")},
0
2023-12-24 20:59:18+00:00
4k
Almas-Ali/SpyIP
spyip/backend.py
[ { "identifier": "TooManyRequests", "path": "spyip/exceptions.py", "snippet": "class TooManyRequests(Exception):\n pass" }, { "identifier": "ConnectionTimeout", "path": "spyip/exceptions.py", "snippet": "class ConnectionTimeout(Exception):\n pass" }, { "identifier": "StatusError", "path": "spyip/exceptions.py", "snippet": "class StatusError(Exception):\n pass" }, { "identifier": "IPResponse", "path": "spyip/models.py", "snippet": "class IPResponse(BaseModel):\n \"\"\"\n Example response from API:\n\n {\n \"status\": \"success\",\n \"continent\": \"Asia\",\n \"continentCode\": \"AS\",\n \"country\": \"India\",\n \"countryCode\": \"IN\",\n \"region\": \"DL\",\n \"regionName\": \"National Capital Territory of Delhi\",\n \"city\": \"New Delhi\",\n \"district\": \"\",\n \"zip\": \"110001\",\n \"lat\": 28.6139,\n \"lon\": 77.209,\n \"timezone\": \"Asia/Kolkata\",\n \"offset\": 19800,\n \"currency\": \"INR\",\n \"isp\": \"Google LLC\",\n \"org\": \"Google LLC\",\n \"as\": \"AS15169 Google LLC\",\n \"asname\": \"GOOGLE\",\n \"mobile\": false,\n \"proxy\": false,\n \"hosting\": true,\n \"query\": \"142.250.193.206\",\n }\n \"\"\"\n\n status: str = Field(..., description='Status of the request.')\n continent: str = Field(..., description='Continent name.')\n continentCode: str = Field(..., description='Continent code.')\n country: str = Field(..., description='Country name.')\n countryCode: str = Field(..., description='Country code.')\n region: str = Field(..., description='Region code.')\n regionName: str = Field(..., description='Region name.')\n city: str = Field(..., description='City name.')\n district: str = Field(..., description='District name.')\n zip_: str = Field(..., description='Zip code.')\n lat: float = Field(..., description='Latitude.')\n lon: float = Field(..., description='Longitude.')\n timezone: str = Field(..., description='Timezone.')\n offset: int = Field(..., description='Offset.')\n currency: str = Field(..., description='Currency.')\n isp: str = Field(..., description='ISP name.')\n org: str = Field(..., description='Organization name.')\n as_: str = Field(..., description='AS number and name.')\n asname: str = Field(..., description='AS name.')\n mobile: bool = Field(..., description='Mobile status.')\n proxy: bool = Field(..., description='Proxy status.')\n hosting: bool = Field(..., description='Hosting status.')\n query: str = Field(..., description='IP address.')\n\n class Config:\n def alias_generator(x):\n return x.replace('_', '')\n\n populate_by_name = True\n # fields = { # Alias for reserved keywords\n # \"as_\": \"as\",\n # \"zip_\": \"zip\",\n # }\n\n @field_validator('status')\n def check_status(cls, v):\n if v != 'success':\n raise ValueError('Invalid IP address.')\n return v\n\n def json(self, **kwargs) -> str:\n return self.model_dump_json(**kwargs)" }, { "identifier": "DNSResponse", "path": "spyip/models.py", "snippet": "class DNSResponse(BaseModel):\n \"\"\"\n Example response from API:\n \"dns\": {\n \"ip\": \"74.125.73.83\",\n \"geo\": \"United States - Google\"\n }\n \"\"\"\n\n ip: str = Field(..., description='IP address.')\n geo: str = Field(..., description='Geo location.')\n\n def json(self, **kwargs) -> str:\n return self.model_dump_json(**kwargs)" } ]
from typing import List, Union from .exceptions import ( TooManyRequests, ConnectionTimeout, StatusError, ) from .models import ( IPResponse, DNSResponse, ) import asyncio import random import string import httpx
1,750
def get_random_string(length: int = 32) -> str: """Generate a random string of fixed length.""" letters = string.ascii_lowercase + string.digits return ''.join(random.sample(letters, length)) # API endpoints for IP address lookup trace_me_url = 'http://ip-api.com/json/' trace_ip_url = 'http://ip-api.com/json/%(query)s' trace_dns_url = f'http://{get_random_string(32)}.edns.ip-api.com/json/' trace_ip_batch_url = 'http://ip-api.com/batch' headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.5', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0', } def trace_me( timeout: int = 5, lang: str = 'en', ) -> Union[IPResponse, None]: """Trace your own IP address.""" try: res = httpx.get( url=trace_me_url, params={'fields': 66842623, 'lang': lang}, headers=headers, timeout=timeout, ) if res.status_code == 200: return IPResponse(**res.json()) else: raise StatusError(f'Invalid status code: {res.status_code}. Expected 200.') # 408 Request Timeout except httpx._exceptions.ConnectTimeout: raise ConnectionTimeout( 'Connection timeout. The server timed out waiting for the request. According to the HTTP specification, the client is allowed to repeat the request again after some time.' ) # 429 Too Many Requests except httpx._exceptions.TooManyRedirects: raise TooManyRequests( 'Too many requests. Our endpoints are limited to 45 HTTP requests per minute from an IP address. If you go over this limit your requests will be throttled (HTTP 429) until your rate limit window is reset.' ) def trace_ip( query: str, timeout: int = 5, lang: str = 'en', ) -> IPResponse: """Trace IP address""" try: res = httpx.get( url=trace_ip_url % {'query': query}, params={'fields': 66842623, 'lang': lang}, headers=headers, timeout=timeout, ) if res.status_code == 200: return IPResponse(**res.json()) else: raise StatusError(f'Invalid status code: {res.status_code}. Expected 200.') # 408 Request Timeout except httpx._exceptions.ConnectTimeout: raise ConnectionTimeout('The server timed out waiting for the request.') # 429 Too Many Requests except httpx._exceptions.TooManyRedirects: raise TooManyRequests( 'Too many requests. Our endpoints are limited to 45 HTTP requests per minute from an IP address. If you go over this limit your requests will be throttled (HTTP 429) until your rate limit window is reset.' ) def trace_dns( timeout: int = 5, lang: str = 'en', ) -> IPResponse: """Trace your own DNS address.""" try: res = httpx.get( url=trace_dns_url, params={'fields': 66842623, 'lang': lang}, headers=headers, timeout=timeout, ) if res.status_code == 200:
def get_random_string(length: int = 32) -> str: """Generate a random string of fixed length.""" letters = string.ascii_lowercase + string.digits return ''.join(random.sample(letters, length)) # API endpoints for IP address lookup trace_me_url = 'http://ip-api.com/json/' trace_ip_url = 'http://ip-api.com/json/%(query)s' trace_dns_url = f'http://{get_random_string(32)}.edns.ip-api.com/json/' trace_ip_batch_url = 'http://ip-api.com/batch' headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.5', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0', } def trace_me( timeout: int = 5, lang: str = 'en', ) -> Union[IPResponse, None]: """Trace your own IP address.""" try: res = httpx.get( url=trace_me_url, params={'fields': 66842623, 'lang': lang}, headers=headers, timeout=timeout, ) if res.status_code == 200: return IPResponse(**res.json()) else: raise StatusError(f'Invalid status code: {res.status_code}. Expected 200.') # 408 Request Timeout except httpx._exceptions.ConnectTimeout: raise ConnectionTimeout( 'Connection timeout. The server timed out waiting for the request. According to the HTTP specification, the client is allowed to repeat the request again after some time.' ) # 429 Too Many Requests except httpx._exceptions.TooManyRedirects: raise TooManyRequests( 'Too many requests. Our endpoints are limited to 45 HTTP requests per minute from an IP address. If you go over this limit your requests will be throttled (HTTP 429) until your rate limit window is reset.' ) def trace_ip( query: str, timeout: int = 5, lang: str = 'en', ) -> IPResponse: """Trace IP address""" try: res = httpx.get( url=trace_ip_url % {'query': query}, params={'fields': 66842623, 'lang': lang}, headers=headers, timeout=timeout, ) if res.status_code == 200: return IPResponse(**res.json()) else: raise StatusError(f'Invalid status code: {res.status_code}. Expected 200.') # 408 Request Timeout except httpx._exceptions.ConnectTimeout: raise ConnectionTimeout('The server timed out waiting for the request.') # 429 Too Many Requests except httpx._exceptions.TooManyRedirects: raise TooManyRequests( 'Too many requests. Our endpoints are limited to 45 HTTP requests per minute from an IP address. If you go over this limit your requests will be throttled (HTTP 429) until your rate limit window is reset.' ) def trace_dns( timeout: int = 5, lang: str = 'en', ) -> IPResponse: """Trace your own DNS address.""" try: res = httpx.get( url=trace_dns_url, params={'fields': 66842623, 'lang': lang}, headers=headers, timeout=timeout, ) if res.status_code == 200:
return DNSResponse(**res.json()['dns'])
4
2023-12-31 19:43:38+00:00
4k
leopedroso45/Stable-Diffusion-ImageGen
sevsd/do_work.py
[ { "identifier": "setup_pipeline", "path": "sevsd/setup_pipeline.py", "snippet": "def setup_pipeline(pretrained_model_link_or_path, **kwargs):\n r\"\"\"\n Sets up and returns a Stable Diffusion pipeline for image generation.\n\n This function initializes the Stable Diffusion pipeline using either a pretrained model link or a local file path. It automatically determines the appropriate device (CPU or GPU) for running the model and applies necessary configuration parameters.\n\n Parameters:\n pretrained_model_link_or_path (str): A link to a pretrained model or a file path to a local model file.\n **kwargs: Additional keyword arguments for pipeline configuration.\n\n Returns:\n StableDiffusionPipeline: The initialized Stable Diffusion pipeline ready for image generation.\n\n Example:\n pipeline = setup_pipeline(\"CompVis/stable-diffusion-v1-4\")\n\n Note:\n - The function supports both remote model links and local `.safetensors` files.\n - It automatically disables the safety checker for faster inference unless specified otherwise in `**kwargs`.\n - The pipeline is configured to use the most efficient device available (CUDA, MPS, or CPU).\n \"\"\"\n\n device = setup_device()\n\n default_kwargs = {\n \"use_safetensors\": False,\n \"load_safety_checker\": False,\n \"requires_safety_checker\": False,\n }\n\n if pretrained_model_link_or_path.endswith(\".safetensors\"):\n default_kwargs[\"use_safetensors\"] = True\n default_kwargs.update(kwargs)\n\n pipeline = StableDiffusionPipeline.from_single_file(\n pretrained_model_link_or_path,\n **default_kwargs\n )\n else:\n default_kwargs.update(kwargs)\n pipeline = StableDiffusionPipeline.from_pretrained(\n pretrained_model_link_or_path,\n **default_kwargs\n )\n\n pipeline.to(device)\n pipeline.enable_attention_slicing()\n\n return pipeline" }, { "identifier": "process_task", "path": "sevsd/process_task.py", "snippet": "def process_task(job, pipeline, executor, path, parallel_exec=True):\n r\"\"\"\n Processes a single image generation job using the specified pipeline and execution parameters.\n\n This function handles the generation of one or more images based on a given job description. It supports both parallel and sequential execution modes. Generated images are saved to the specified path.\n\n Parameters:\n job (dict): A dictionary containing details for the image generation task. It includes 'prompt' and optionally 'negative_prompt'.\n pipeline (callable): The Stable Diffusion pipeline callable used for generating images.\n executor (dict): A dictionary containing execution parameters such as 'num_of_exec', 'cfg_scale', and 'inference_steps'.\n path (str): The directory path where generated images will be saved.\n parallel_exec (bool, optional): If True, generates all specified images in parallel. Defaults to True.\n\n The function saves each generated image with a unique timestamp in the specified path and prints the save location. In case of any exceptions, they are caught and printed.\n\n Example:\n job = {\n \"prompt\": \"A scenic landscape\",\n \"negative_prompt\": \"blurred image, black and white, watermarked image\"\n }\n executor = {\n \"num_of_exec\": 2,\n \"cfg_scale\": 7,\n \"inference_steps\": 50\n }\n pipeline = setup_pipeline(\"CompVis/stable-diffusion-v1-4\")\n process_task(job, pipeline, executor, \"./generated-images\", parallel_exec=False)\n\n Note:\n This function also handles CUDA cache clearing and garbage collection for memory management.\n \"\"\"\n \n def call_generate_image():\n images = generate_image(job, pipeline, executor, parallel_exec)\n if images is not None:\n for image in images:\n timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S%f\")\n image_path = f\"{path}/generated_image_{timestamp}.png\"\n image.save(image_path)\n print(f\"[sevsd] - image saved at {image_path}\")\n else:\n print(\"[sevsd] - image generation failed due to memory constraints.\")\n check_cuda_and_clear_cache()\n \n try:\n path = check_os_path(path)\n if job is not None:\n if parallel_exec is not True:\n num_images = executor.get(\"num_of_exec\", 1)\n for _ in range(num_images):\n call_generate_image()\n else:\n call_generate_image()\n except Exception as e:\n print(f\"[sevsd] - exception: {e}\")\n finally:\n check_cuda_and_clear_cache()" } ]
from sevsd.setup_pipeline import setup_pipeline from sevsd.process_task import process_task
1,640
def do_work(models, jobs, image_path, parallel_exec=True, **kwargs): r""" Orchestrates the processing of image generation tasks based on given models and jobs. This function iterates over each model and the associated jobs, generating images as specified. It sets up the pipeline for each model and executes the image generation tasks, saving the results to the specified path. Parameters: models (list of dicts): List of model configurations. Each configuration includes: - 'name' (str): The model name or path. - 'executor' (dict): Parameters like 'labels', 'num_of_exec', 'cfg_scale', and 'inference_steps'. jobs (list of dicts): List of job configurations. Each job includes: - 'label' (int): Corresponding model label. - 'prompt' (str): Text prompt for image generation. - 'negative_prompt' (str, optional): Text prompt for undesired image features. image_path (str): Directory path to save the generated images. parallel_exec (bool, optional): Flag to enable parallel execution. Defaults to True. **kwargs: Additional keyword arguments for pipeline setup. Example: models = [ { "name": "CompVis/stable-diffusion-v1-4", "executor": { "labels": [1], "num_of_exec": 1, "cfg_scale": 7, "inference_steps": 100, } }, { "name": "./model_cache/model2.safetensors", "executor": { "labels": [2], "num_of_exec": 2, "cfg_scale": 6, "inference_steps": 50, } }, ] jobs = [ { "label": 1, "prompt": "A scenic landscape", "negative_prompt": "blurred image, black and white, watermarked image", }, { "label": 2, "prompt": "A person wearing a mask", "negative_prompt": "deformed anatomy, hand-drawn image, blurred image", }, ] do_work(models, jobs, "./generated-images") """ job_dict = {job['label']: [] for job in jobs} for job in jobs: job_dict[job['label']].append(job) for model in models: pipeline = setup_pipeline(model["name"], **kwargs) labels = model.get("executor", {}).get("labels", []) for label in labels: if label in job_dict: for job in job_dict[label]: executor = model.get("executor", {})
def do_work(models, jobs, image_path, parallel_exec=True, **kwargs): r""" Orchestrates the processing of image generation tasks based on given models and jobs. This function iterates over each model and the associated jobs, generating images as specified. It sets up the pipeline for each model and executes the image generation tasks, saving the results to the specified path. Parameters: models (list of dicts): List of model configurations. Each configuration includes: - 'name' (str): The model name or path. - 'executor' (dict): Parameters like 'labels', 'num_of_exec', 'cfg_scale', and 'inference_steps'. jobs (list of dicts): List of job configurations. Each job includes: - 'label' (int): Corresponding model label. - 'prompt' (str): Text prompt for image generation. - 'negative_prompt' (str, optional): Text prompt for undesired image features. image_path (str): Directory path to save the generated images. parallel_exec (bool, optional): Flag to enable parallel execution. Defaults to True. **kwargs: Additional keyword arguments for pipeline setup. Example: models = [ { "name": "CompVis/stable-diffusion-v1-4", "executor": { "labels": [1], "num_of_exec": 1, "cfg_scale": 7, "inference_steps": 100, } }, { "name": "./model_cache/model2.safetensors", "executor": { "labels": [2], "num_of_exec": 2, "cfg_scale": 6, "inference_steps": 50, } }, ] jobs = [ { "label": 1, "prompt": "A scenic landscape", "negative_prompt": "blurred image, black and white, watermarked image", }, { "label": 2, "prompt": "A person wearing a mask", "negative_prompt": "deformed anatomy, hand-drawn image, blurred image", }, ] do_work(models, jobs, "./generated-images") """ job_dict = {job['label']: [] for job in jobs} for job in jobs: job_dict[job['label']].append(job) for model in models: pipeline = setup_pipeline(model["name"], **kwargs) labels = model.get("executor", {}).get("labels", []) for label in labels: if label in job_dict: for job in job_dict[label]: executor = model.get("executor", {})
process_task(job, pipeline, executor, image_path, parallel_exec)
1
2023-12-28 16:19:12+00:00
4k
Emperor-WS/PyEmber
ember/autograd/function.py
[ { "identifier": "numpy_or_cupy", "path": "ember/cuda.py", "snippet": "def numpy_or_cupy(*tensors):\n \"\"\"\n Choose between NumPy and CuPy based on the device of input tensors.\n\n Args:\n *tensors: Variable number of tensors.\n\n Returns:\n module: NumPy or CuPy module based on the device of input tensors.\n\n Raises:\n RuntimeError: If tensors are on different devices.\n \"\"\"\n device = numpy.mean([t.device == 'cuda' for t in tensors])\n if device == 1:\n return cupy\n elif device == 0:\n return numpy\n else:\n logging.error(f\"Cannot compute from tensors on different devices. \"\n f\"Got {', '.join([t.device for t in tensors])}.\")" }, { "identifier": "scalars_to_device", "path": "ember/cuda.py", "snippet": "def scalars_to_device(*tensors):\n \"\"\"\n Move scalar tensors to the CUDA device if available.\n\n Args:\n *tensors: Variable number of tensors.\n\n Returns:\n None\n \"\"\"\n device = numpy.mean([t.device == 'cuda' for t in tensors])\n\n if device > 0:\n for tensor in tensors:\n if tensor.shape == ():\n tensor.cuda()" }, { "identifier": "inv_permutation", "path": "ember/autograd/utils.py", "snippet": "def inv_permutation(permutation):\n \"\"\"\n Compute the inverse of a permutation.\n\n Args:\n - permutation (list): List representing a permutation.\n\n Returns:\n - list: Inverse permutation.\n\n \"\"\"\n inverse = [0] * len(permutation)\n for original_idx, permuted_idx in enumerate(permutation):\n inverse[permuted_idx] = original_idx\n return inverse" }, { "identifier": "Hook", "path": "ember/autograd/hook.py", "snippet": "class Hook:\n \"\"\"\n Hook class for attaching gradient functions to tensors.\n\n Hooks allow users to attach custom gradient functions to tensors for\n monitoring or modifying gradients during backpropagation.\n\n Attributes:\n - tensor (Tensor): The target tensor.\n - grad_fn (callable): The gradient function to be applied to the tensor.\n\n Methods:\n - __init__(self, tensor, grad_fn): Constructor for Hook class.\n - __repr__(self): String representation of the Hook instance.\n\n \"\"\"\n\n __slots__ = 'tensor', 'grad_fn'\n\n def __init__(self, tensor, grad_fn):\n \"\"\"\n Constructor for the Hook class.\n\n Args:\n - tensor (Tensor): The target tensor.\n - grad_fn (callable): The gradient function to be applied to the tensor.\n\n \"\"\"\n self.tensor = tensor\n self.grad_fn = grad_fn\n\n def __repr__(self):\n \"\"\"\n String representation of the Hook instance.\n\n Returns:\n - str: A string containing information about the tensor and its associated gradient function.\n\n \"\"\"\n # Extract the class name from the qualified name of the gradient function\n grad_name = self.grad_fn.__qualname__.split('.')[0]\n return f\"Hook(tensor_id={self.tensor.id}, grad_fn={grad_name.upper()})\"" } ]
from abc import ABC, abstractmethod from ember.cuda import numpy_or_cupy, scalars_to_device from .utils import inv_permutation from .hook import Hook import numpy as np import copy import ember
1,626
def backward(self, grad): """Abstract method for the backward pass.""" raise NotImplementedError def __call__(self, *tensors): """ Invokes the function, registering hooks for gradients. Args: - *tensors: Variable number of input tensors. Returns: - Tensor: Output tensor from the forward pass. """ self.tensors = (*tensors,) scalars_to_device(*self.tensors) # Perform the forward pass out = self.forward(*tensors) # Register hooks for gradients for tensor in self.tensors: if tensor.requires_grad: out.register_hook(Hook(tensor, self.backward)) return out def __repr__(self): """ Returns a string representation of the function. Returns: - str: String representation of the function. """ return f'<Function: {self.__class__.__name__}>' class Add(Function): """ Addition operation. Methods: - forward(tensor1, tensor2): Performs addition. - single_backward(grad, tensor): Computes gradient for a single tensor. - backward(grad): Computes gradients for tensors involved in the backward pass. """ def forward(self, tensor1, tensor2): """ Performs addition. Args: - tensor1: First input tensor. - tensor2: Second input tensor. Returns: - Tensor: Resultant tensor after addition. """ data = tensor1.data + tensor2.data requires_grad = tensor1.requires_grad or tensor2.requires_grad device = tensor1.device return ember.Tensor(data, requires_grad=requires_grad, device=device) @staticmethod def single_backward(grad, tensor): """ Computes gradient for a single tensor. Args: - grad: Gradient. - tensor: Input tensor. Returns: - Tensor: Gradient for the input tensor. """ num_dims_added = grad.ndim - tensor.ndim for _ in range(num_dims_added): grad = grad.sum(axis=0) for i, dim in enumerate(tensor.shape): if dim == 1: grad = grad.sum(axis=i, keepdims=True) return grad def backward(self, grad): """ Computes gradients for tensors involved in the backward pass. Args: - grad: Gradient. Returns: - Tuple: Gradients for each input tensor. """ tensor1, tensor2 = self.tensors return (self.single_backward(grad, tensor1), self.single_backward(grad, tensor2)) class Multiply(Function): """ Multiplication operation. Methods: - forward(tensor1, tensor2): Performs multiplication. - single_backward(grad, t1, t2): Computes gradient for a single tensor. - backward(grad): Computes gradients for tensors involved in the backward pass. """ def forward(self, tensor1, tensor2): """ Performs multiplication. Args: - tensor1: First input tensor. - tensor2: Second input tensor. Returns: - Tensor: Resultant tensor after multiplication. """ # Determine whether to use NumPy or CuPy for element-wise multiplication
class Function(ABC): """ Abstract base class for defining mathematical operations as functions. Attributes: - tensors: Tensors involved in the operation. Methods: - forward(*tensors): Abstract method for the forward pass. - backward(grad): Abstract method for the backward pass. - __call__(*tensors): Invokes the function, registering hooks for gradients. - __repr__(): Returns a string representation of the function. """ __slots__ = 'tensors' def __init__(self): super(Function, self).__init__() self.tensors = None @abstractmethod def forward(self, *tensors): """Abstract method for the forward pass.""" raise NotImplementedError @abstractmethod def backward(self, grad): """Abstract method for the backward pass.""" raise NotImplementedError def __call__(self, *tensors): """ Invokes the function, registering hooks for gradients. Args: - *tensors: Variable number of input tensors. Returns: - Tensor: Output tensor from the forward pass. """ self.tensors = (*tensors,) scalars_to_device(*self.tensors) # Perform the forward pass out = self.forward(*tensors) # Register hooks for gradients for tensor in self.tensors: if tensor.requires_grad: out.register_hook(Hook(tensor, self.backward)) return out def __repr__(self): """ Returns a string representation of the function. Returns: - str: String representation of the function. """ return f'<Function: {self.__class__.__name__}>' class Add(Function): """ Addition operation. Methods: - forward(tensor1, tensor2): Performs addition. - single_backward(grad, tensor): Computes gradient for a single tensor. - backward(grad): Computes gradients for tensors involved in the backward pass. """ def forward(self, tensor1, tensor2): """ Performs addition. Args: - tensor1: First input tensor. - tensor2: Second input tensor. Returns: - Tensor: Resultant tensor after addition. """ data = tensor1.data + tensor2.data requires_grad = tensor1.requires_grad or tensor2.requires_grad device = tensor1.device return ember.Tensor(data, requires_grad=requires_grad, device=device) @staticmethod def single_backward(grad, tensor): """ Computes gradient for a single tensor. Args: - grad: Gradient. - tensor: Input tensor. Returns: - Tensor: Gradient for the input tensor. """ num_dims_added = grad.ndim - tensor.ndim for _ in range(num_dims_added): grad = grad.sum(axis=0) for i, dim in enumerate(tensor.shape): if dim == 1: grad = grad.sum(axis=i, keepdims=True) return grad def backward(self, grad): """ Computes gradients for tensors involved in the backward pass. Args: - grad: Gradient. Returns: - Tuple: Gradients for each input tensor. """ tensor1, tensor2 = self.tensors return (self.single_backward(grad, tensor1), self.single_backward(grad, tensor2)) class Multiply(Function): """ Multiplication operation. Methods: - forward(tensor1, tensor2): Performs multiplication. - single_backward(grad, t1, t2): Computes gradient for a single tensor. - backward(grad): Computes gradients for tensors involved in the backward pass. """ def forward(self, tensor1, tensor2): """ Performs multiplication. Args: - tensor1: First input tensor. - tensor2: Second input tensor. Returns: - Tensor: Resultant tensor after multiplication. """ # Determine whether to use NumPy or CuPy for element-wise multiplication
nc = numpy_or_cupy(tensor1, tensor2)
0
2023-12-23 23:11:58+00:00
4k
Hassi34/iot-device-identification
src/stage_01_ingest_data.py
[ { "identifier": "read_yaml", "path": "src/utils/common.py", "snippet": "def read_yaml(path_to_yaml: str) -> dict:\n with open(path_to_yaml) as yaml_file:\n content = yaml.safe_load(yaml_file)\n return content" }, { "identifier": "MongoDBOps", "path": "src/utils/mongo_ops.py", "snippet": "class MongoDBOps:\n # client = None\n\n def __init__(self, database_name=MONGO_DATABSE_NAME) -> None:\n try:\n try:\n self.client = MongoDBOps.client\n self.database_name = self.client[database_name]\n except AttributeError:\n mongo_db_uri = MONGO_DB_URI\n self.client = MongoClient(mongo_db_uri, server_api=ServerApi(\"1\"))\n self.database = self.client[database_name]\n self.database_name = database_name\n except Exception as e:\n raise e\n\n def export_collection_as_dataframe(\n self,\n collection_name: str,\n rows_to_load: int,\n database_name: Optional[str] = None,\n ) -> pd.DataFrame:\n \"\"\"\n export entire collectin as dataframe:\n return pd.DataFrame of collection\n \"\"\"\n if database_name is None:\n collection = self.client[self.database_name][collection_name]\n\n else:\n collection = self.client[database_name][collection_name]\n\n df = pd.DataFrame(list(collection.find().limit(rows_to_load)))\n\n if \"_id\" in df.columns.to_list():\n df = df.drop(columns=[\"_id\"], axis=1)\n\n df.replace({\"na\": np.nan}, inplace=True)\n\n return df\n\n def insert_many(self, collection_name, records: list):\n self.client[self.database_name][collection_name].insert_many(records)\n\n def insert(self, collection_name, record):\n self.client[self.database_name][collection_name].insert_one(record)" }, { "identifier": "get_logger", "path": "src/utils/sys_logging.py", "snippet": "def get_logger(logs_filepath: str):\n logger.add(\n logs_filepath,\n format=\"{time} | {level} | {name}.{module}:{line} | {message}\",\n level=\"DEBUG\",\n rotation=\"10 KB\",\n retention=\"10 days\",\n compression=\"zip\",\n colorize=True,\n enqueue=True,\n catch=True,\n encoding=\"utf-8\",\n )\n return logger" }, { "identifier": "MLFlowManager", "path": "src/utils/mlflow_ops.py", "snippet": "class MLFlowManager:\n def __init__(self):\n if mlflow.tracking.is_tracking_uri_set():\n self.client = MlflowClient()\n else:\n raise Exception(\"Tracking URI not set\")\n\n def get_or_create_an_experiment(self, experiment_name):\n exp = mlflow.get_experiment_by_name(experiment_name)\n if exp is None:\n exp_id = mlflow.create_experiment(experiment_name)\n return exp_id\n return exp.experiment_id\n\n def latest_model_version(self, model_name) -> int:\n return self.client.get_latest_versions(model_name)[0].version\n\n @property\n def get_latest_version_model_uri(self, model_name) -> str:\n model_uri = f\"models:/{model_name}/{self.latest_model_version(model_name)}\"\n return model_uri\n\n def load_latest_model_version(self, model_name):\n return load_model(self.get_latest_version_model_uri(model_name))\n\n def get_best_run_id_and_model_uri(\n self, experiment_id: str, metric_name: str = \"metrics.mae\", ascending=True\n ):\n runs = mlflow.search_runs(f\"{experiment_id}\")\n runs = runs.dropna(subset=[\"tags.mlflow.log-model.history\"])\n runs.sort_values(by=[metric_name], ascending=ascending, inplace=True)\n runs.to_csv(\"mlflow.csv\", index=False)\n runs.reset_index(inplace=True, drop=True)\n best_run_id = runs[\"run_id\"][0]\n\n best_run = runs[runs[\"run_id\"] == best_run_id]\n artifact_uri = best_run[\"artifact_uri\"][0]\n\n logged_model_dir = best_run[\"tags.mlflow.log-model.history\"][0].split(\",\")[1:2]\n logged_model_dir = (\n logged_model_dir[0].strip().split(\":\")[1].replace('\"', \"\").strip()\n )\n\n model_uri = str(artifact_uri) + \"/\" + str(logged_model_dir)\n\n return best_run_id, model_uri\n\n def print_registered_model(self, model_name):\n for model in self.client.search_registered_models(\n filter_string=f\"name LIKE {model_name}\"\n ):\n for model_version in model.latest_versions:\n print(\n f\"name : {model_version.name} run_id : {model_version.run_id} version : {model_version.version} stage : {model_version.current_stage}\"\n )\n\n def rename_a_registered_model(self, current_name, new_name):\n self.client.rename_registered_model(\n name=current_name,\n new_name=new_name,\n )\n\n def transition_model_version_stage(self, model_name, model_version, stage):\n self.client.transition_model_version_stage(\n name=model_name, version=model_version, stage=stage\n )\n\n def log_artifact(self, artifact_path=str):\n mlflow.log_artifact(artifact_path)" } ]
import argparse import mlflow import os from src.utils.common import read_yaml from src.utils import MongoDBOps from src.utils.sys_logging import get_logger from src.utils import MLFlowManager from pathlib import Path
1,846
STAGE = "Ingest Data" def ingest_data(): logger.info("Pulling data from the source...") mongo_db = MongoDBOps(database_name=MONGO_DATABSE_NAME) complete_df = mongo_db.export_collection_as_dataframe( collection_name=MONGO_COLLECTION_NAME, rows_to_load=MONGO_NUMBER_OF_ROWS_TO_INGEST, ) logger.info( f'The collection has been exported as a pandas dataframe with the shape "{complete_df.shape}"' ) Path(RAW_DATA_FILE_PATH).parent.absolute().mkdir(parents=True, exist_ok=True) complete_df.to_parquet(RAW_DATA_FILE_PATH, compression="gzip") logger.info(f'Data has been saved locally at "{RAW_DATA_FILE_PATH}"') mlflow_service = MLFlowManager() mlflow.set_experiment(EXPERIMENT_NAME) runs = mlflow.search_runs(order_by=["attribute.start_time DESC"]) if runs.empty: logger.warning("This is a new experiment, skipping the data drift check...") recent_run = runs[0:1] recent_run_id = recent_run.run_id[0] Path(LAST_EXP_DATA_DIR).absolute().mkdir(parents=True, exist_ok=True) file_name = Path(RAW_DATA_FILE_PATH).resolve().name mlflow_artifact_path = MLFLOW_ARTIFACT_DIR + "/" + file_name last_experiment_data_file_path = os.path.join( LAST_EXP_DATA_DIR, MLFLOW_ARTIFACT_DIR, file_name ) try: mlflow_service.client.download_artifacts( recent_run_id, mlflow_artifact_path, LAST_EXP_DATA_DIR ) logger.info( f"The last data version has been downloaded and saved to {last_experiment_data_file_path}" ) except Exception as e: logger.error("Could not download the last data version") raise e if __name__ == "__main__": args = argparse.ArgumentParser() args.add_argument("--config", "-c", default="configs/system.yaml") parsed_args = args.parse_args() config = read_yaml(parsed_args.config) LOGS_FILE_PATH = config["logs"]["RUNNING_LOGS_FILE_PATH"] RAW_DATA_FILE_PATH = config["data"]["RAW_DATA_FILE_PATH"][0] LAST_EXP_DATA_DIR = config["data"]["LAST_EXP_DATA_DIR"] MONGO_DATABSE_NAME = config["data"]["MONGO_DATABSE_NAME"] MONGO_COLLECTION_NAME = config["data"]["MONGO_COLLECTION_NAME"] MONGO_NUMBER_OF_ROWS_TO_INGEST = config["data"]["MONGO_NUMBER_OF_ROWS_TO_INGEST"] MLFLOW_ARTIFACT_DIR = config["mlflow"]["ARTIFACT_DIR"] EXPERIMENT_NAME = config["mlflow"]["EXPERIMENT_NAME"]
STAGE = "Ingest Data" def ingest_data(): logger.info("Pulling data from the source...") mongo_db = MongoDBOps(database_name=MONGO_DATABSE_NAME) complete_df = mongo_db.export_collection_as_dataframe( collection_name=MONGO_COLLECTION_NAME, rows_to_load=MONGO_NUMBER_OF_ROWS_TO_INGEST, ) logger.info( f'The collection has been exported as a pandas dataframe with the shape "{complete_df.shape}"' ) Path(RAW_DATA_FILE_PATH).parent.absolute().mkdir(parents=True, exist_ok=True) complete_df.to_parquet(RAW_DATA_FILE_PATH, compression="gzip") logger.info(f'Data has been saved locally at "{RAW_DATA_FILE_PATH}"') mlflow_service = MLFlowManager() mlflow.set_experiment(EXPERIMENT_NAME) runs = mlflow.search_runs(order_by=["attribute.start_time DESC"]) if runs.empty: logger.warning("This is a new experiment, skipping the data drift check...") recent_run = runs[0:1] recent_run_id = recent_run.run_id[0] Path(LAST_EXP_DATA_DIR).absolute().mkdir(parents=True, exist_ok=True) file_name = Path(RAW_DATA_FILE_PATH).resolve().name mlflow_artifact_path = MLFLOW_ARTIFACT_DIR + "/" + file_name last_experiment_data_file_path = os.path.join( LAST_EXP_DATA_DIR, MLFLOW_ARTIFACT_DIR, file_name ) try: mlflow_service.client.download_artifacts( recent_run_id, mlflow_artifact_path, LAST_EXP_DATA_DIR ) logger.info( f"The last data version has been downloaded and saved to {last_experiment_data_file_path}" ) except Exception as e: logger.error("Could not download the last data version") raise e if __name__ == "__main__": args = argparse.ArgumentParser() args.add_argument("--config", "-c", default="configs/system.yaml") parsed_args = args.parse_args() config = read_yaml(parsed_args.config) LOGS_FILE_PATH = config["logs"]["RUNNING_LOGS_FILE_PATH"] RAW_DATA_FILE_PATH = config["data"]["RAW_DATA_FILE_PATH"][0] LAST_EXP_DATA_DIR = config["data"]["LAST_EXP_DATA_DIR"] MONGO_DATABSE_NAME = config["data"]["MONGO_DATABSE_NAME"] MONGO_COLLECTION_NAME = config["data"]["MONGO_COLLECTION_NAME"] MONGO_NUMBER_OF_ROWS_TO_INGEST = config["data"]["MONGO_NUMBER_OF_ROWS_TO_INGEST"] MLFLOW_ARTIFACT_DIR = config["mlflow"]["ARTIFACT_DIR"] EXPERIMENT_NAME = config["mlflow"]["EXPERIMENT_NAME"]
logger = get_logger(LOGS_FILE_PATH)
2
2023-12-25 10:40:19+00:00
4k
see2023/Bert-VITS2-ext
for_deploy/webui.py
[ { "identifier": "split_by_language", "path": "tools/sentence.py", "snippet": "def split_by_language(text: str, target_languages: list = None) -> list:\n pattern = (\n r\"[\\!\\\"\\#\\$\\%\\&\\'\\(\\)\\*\\+\\,\\-\\.\\/\\:\\;\\<\\>\\=\\?\\@\\[\\]\\{\\}\\\\\\\\\\^\\_\\`\"\n r\"\\!?\\。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」\"\n r\"『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘\\'\\‛\\“\\”\\„\\‟…‧﹏.]+\"\n )\n sentences = re.split(pattern, text)\n\n pre_lang = \"\"\n start = 0\n end = 0\n sentences_list = []\n\n if target_languages is not None:\n sorted_target_languages = sorted(target_languages)\n if sorted_target_languages in [[\"en\", \"zh\"], [\"en\", \"ja\"], [\"en\", \"ja\", \"zh\"]]:\n new_sentences = []\n for sentence in sentences:\n new_sentences.extend(split_alpha_nonalpha(sentence))\n sentences = new_sentences\n\n for sentence in sentences:\n if check_is_none(sentence):\n continue\n\n lang = classify_language(sentence, target_languages)\n\n end += text[end:].index(sentence)\n if pre_lang != \"\" and pre_lang != lang:\n sentences_list.append((text[start:end], pre_lang))\n start = end\n end += len(sentence)\n pre_lang = lang\n sentences_list.append((text[start:], pre_lang))\n\n return sentences_list" }, { "identifier": "infer", "path": "infer.py", "snippet": "def get_net_g(model_path: str, version: str, device: str, hps):\ndef get_text(text, language_str, hps, device, style_text=None, style_weight=0.7):\ndef infer(\n text,\n emotion,\n sdp_ratio,\n noise_scale,\n noise_scale_w,\n length_scale,\n sid,\n language,\n hps,\n net_g,\n device,\n reference_audio=None,\n skip_start=False,\n skip_end=False,\n style_text=None,\n style_weight=0.7,\n):\ndef infer_multilang(\n text,\n sdp_ratio,\n noise_scale,\n noise_scale_w,\n length_scale,\n sid,\n language,\n hps,\n net_g,\n device,\n reference_audio=None,\n emotion=None,\n skip_start=False,\n skip_end=False,\n):" }, { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n v_model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "translate", "path": "tools/translate.py", "snippet": "def translate(Sentence: str, to_Language: str = \"jp\", from_Language: str = \"\"):\n \"\"\"\n :param Sentence: 待翻译语句\n :param from_Language: 待翻译语句语言\n :param to_Language: 目标语言\n :return: 翻译后语句 出错时返回None\n\n 常见语言代码:中文 zh 英语 en 日语 jp\n \"\"\"\n appid = config.translate_config.app_key\n key = config.translate_config.secret_key\n if appid == \"\" or key == \"\":\n return \"请开发者在config.yml中配置app_key与secret_key\"\n url = \"https://fanyi-api.baidu.com/api/trans/vip/translate\"\n texts = Sentence.splitlines()\n outTexts = []\n for t in texts:\n if t != \"\":\n # 签名计算 参考文档 https://api.fanyi.baidu.com/product/113\n salt = str(random.randint(1, 100000))\n signString = appid + t + salt + key\n hs = hashlib.md5()\n hs.update(signString.encode(\"utf-8\"))\n signString = hs.hexdigest()\n if from_Language == \"\":\n from_Language = \"auto\"\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n payload = {\n \"q\": t,\n \"from\": from_Language,\n \"to\": to_Language,\n \"appid\": appid,\n \"salt\": salt,\n \"sign\": signString,\n }\n # 发送请求\n try:\n response = requests.post(\n url=url, data=payload, headers=headers, timeout=3\n )\n response = response.json()\n if \"trans_result\" in response.keys():\n result = response[\"trans_result\"][0]\n if \"dst\" in result.keys():\n dst = result[\"dst\"]\n outTexts.append(dst)\n except Exception:\n return Sentence\n else:\n outTexts.append(t)\n return \"\\n\".join(outTexts)" } ]
import os import logging import re_matching import torch import utils import gradio as gr import webbrowser import numpy as np import librosa from tools.sentence import split_by_language from infer import infer, latest_version, get_net_g, infer_multilang from config import config from tools.translate import translate from infer_utils import BertFeature, ClapFeature
1,989
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logging.getLogger("markdown_it").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("matplotlib").setLevel(logging.WARNING) logging.basicConfig( level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s" ) logger = logging.getLogger(__name__) net_g = None
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logging.getLogger("markdown_it").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("matplotlib").setLevel(logging.WARNING) logging.basicConfig( level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s" ) logger = logging.getLogger(__name__) net_g = None
device = config.webui_config.device
2
2023-12-27 03:09:11+00:00
4k
chinhsuanwu/ifusion-threestudio
threestudio/systems/zero123.py
[ { "identifier": "BaseLift3DSystem", "path": "threestudio/systems/base.py", "snippet": "class BaseLift3DSystem(BaseSystem):\n @dataclass\n class Config(BaseSystem.Config):\n geometry_type: str = \"\"\n geometry: dict = field(default_factory=dict)\n geometry_convert_from: Optional[str] = None\n geometry_convert_inherit_texture: bool = False\n # used to override configurations of the previous geometry being converted from,\n # for example isosurface_threshold\n geometry_convert_override: dict = field(default_factory=dict)\n\n material_type: str = \"\"\n material: dict = field(default_factory=dict)\n\n background_type: str = \"\"\n background: dict = field(default_factory=dict)\n\n renderer_type: str = \"\"\n renderer: dict = field(default_factory=dict)\n\n guidance_type: str = \"\"\n guidance: dict = field(default_factory=dict)\n\n prompt_processor_type: str = \"\"\n prompt_processor: dict = field(default_factory=dict)\n\n # geometry export configurations, no need to specify in training\n exporter_type: str = \"mesh-exporter\"\n exporter: dict = field(default_factory=dict)\n\n cfg: Config\n\n def configure(self) -> None:\n self.cfg.geometry_convert_from = find_last_path(self.cfg.geometry_convert_from)\n self.cfg.weights = find_last_path(self.cfg.weights)\n if (\n self.cfg.geometry_convert_from # from_coarse must be specified\n and not self.cfg.weights # not initialized from coarse when weights are specified\n and not self.resumed # not initialized from coarse when resumed from checkpoints\n ):\n threestudio.info(\"Initializing geometry from a given checkpoint ...\")\n from threestudio.utils.config import load_config, parse_structured\n\n prev_cfg = load_config(\n os.path.join(\n os.path.dirname(self.cfg.geometry_convert_from),\n \"../configs/parsed.yaml\",\n )\n ) # TODO: hard-coded relative path\n prev_system_cfg: BaseLift3DSystem.Config = parse_structured(\n self.Config, prev_cfg.system\n )\n prev_geometry_cfg = prev_system_cfg.geometry\n prev_geometry_cfg.update(self.cfg.geometry_convert_override)\n prev_geometry = threestudio.find(prev_system_cfg.geometry_type)(\n prev_geometry_cfg\n )\n state_dict, epoch, global_step = load_module_weights(\n self.cfg.geometry_convert_from,\n module_name=\"geometry\",\n map_location=\"cpu\",\n )\n prev_geometry.load_state_dict(state_dict, strict=False)\n # restore step-dependent states\n prev_geometry.do_update_step(epoch, global_step, on_load_weights=True)\n # convert from coarse stage geometry\n prev_geometry = prev_geometry.to(get_device())\n self.geometry = threestudio.find(self.cfg.geometry_type).create_from(\n prev_geometry,\n self.cfg.geometry,\n copy_net=self.cfg.geometry_convert_inherit_texture,\n )\n del prev_geometry\n cleanup()\n else:\n self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry)\n\n self.material = threestudio.find(self.cfg.material_type)(self.cfg.material)\n self.background = threestudio.find(self.cfg.background_type)(\n self.cfg.background\n )\n self.renderer = threestudio.find(self.cfg.renderer_type)(\n self.cfg.renderer,\n geometry=self.geometry,\n material=self.material,\n background=self.background,\n )\n\n def on_fit_start(self) -> None:\n if self._save_dir is not None:\n threestudio.info(f\"Validation results will be saved to {self._save_dir}\")\n else:\n threestudio.warn(\n f\"Saving directory not set for the system, visualization results will not be saved\"\n )\n\n def on_test_end(self) -> None:\n if self._save_dir is not None:\n threestudio.info(f\"Test results saved to {self._save_dir}\")\n\n def on_predict_start(self) -> None:\n self.exporter: Exporter = threestudio.find(self.cfg.exporter_type)(\n self.cfg.exporter,\n geometry=self.geometry,\n material=self.material,\n background=self.background,\n )\n\n def predict_step(self, batch, batch_idx):\n if self.exporter.cfg.save_video:\n self.test_step(batch, batch_idx)\n\n def on_predict_epoch_end(self) -> None:\n if self.exporter.cfg.save_video:\n self.on_test_epoch_end()\n exporter_output: List[ExporterOutput] = self.exporter()\n for out in exporter_output:\n save_func_name = f\"save_{out.save_type}\"\n if not hasattr(self, save_func_name):\n raise ValueError(f\"{save_func_name} not supported by the SaverMixin\")\n save_func = getattr(self, save_func_name)\n save_func(f\"it{self.true_global_step}-export/{out.save_name}\", **out.params)\n\n def on_predict_end(self) -> None:\n if self._save_dir is not None:\n threestudio.info(f\"Export assets saved to {self._save_dir}\")\n\n def guidance_evaluation_save(self, comp_rgb, guidance_eval_out):\n B, size = comp_rgb.shape[:2]\n resize = lambda x: F.interpolate(\n x.permute(0, 3, 1, 2), (size, size), mode=\"bilinear\", align_corners=False\n ).permute(0, 2, 3, 1)\n filename = f\"it{self.true_global_step}-train.png\"\n\n def merge12(x):\n return x.reshape(-1, *x.shape[2:])\n\n self.save_image_grid(\n filename,\n [\n {\n \"type\": \"rgb\",\n \"img\": merge12(comp_rgb),\n \"kwargs\": {\"data_format\": \"HWC\"},\n },\n ]\n + (\n [\n {\n \"type\": \"rgb\",\n \"img\": merge12(resize(guidance_eval_out[\"imgs_noisy\"])),\n \"kwargs\": {\"data_format\": \"HWC\"},\n }\n ]\n )\n + (\n [\n {\n \"type\": \"rgb\",\n \"img\": merge12(resize(guidance_eval_out[\"imgs_1step\"])),\n \"kwargs\": {\"data_format\": \"HWC\"},\n }\n ]\n )\n + (\n [\n {\n \"type\": \"rgb\",\n \"img\": merge12(resize(guidance_eval_out[\"imgs_1orig\"])),\n \"kwargs\": {\"data_format\": \"HWC\"},\n }\n ]\n )\n + (\n [\n {\n \"type\": \"rgb\",\n \"img\": merge12(resize(guidance_eval_out[\"imgs_final\"])),\n \"kwargs\": {\"data_format\": \"HWC\"},\n }\n ]\n ),\n name=\"train_step\",\n step=self.true_global_step,\n texts=guidance_eval_out[\"texts\"],\n )" }, { "identifier": "binary_cross_entropy", "path": "threestudio/utils/ops.py", "snippet": "def binary_cross_entropy(input, target):\n \"\"\"\n F.binary_cross_entropy is not numerically stable in mixed-precision training.\n \"\"\"\n return -(target * torch.log(input) + (1 - target) * torch.log(1 - input)).mean()" }, { "identifier": "dot", "path": "threestudio/utils/ops.py", "snippet": "def dot(x, y):\n return torch.sum(x * y, -1, keepdim=True)" } ]
import os import random import shutil import torch import torch.nn.functional as F import threestudio from dataclasses import dataclass, field from PIL import Image, ImageDraw from torchmetrics import PearsonCorrCoef from threestudio.systems.base import BaseLift3DSystem from threestudio.utils.ops import binary_cross_entropy, dot from threestudio.utils.typing import *
3,217
if guidance == "ref": # bg_color = torch.rand_like(batch['rays_o']) ambient_ratio = 1.0 shading = "diffuse" batch["shading"] = shading elif guidance == "zero123": batch = batch["random_camera"] ambient_ratio = ( self.cfg.ambient_ratio_min + (1 - self.cfg.ambient_ratio_min) * random.random() ) batch["bg_color"] = None batch["ambient_ratio"] = ambient_ratio out = self(batch) loss_prefix = f"loss_{guidance}_" loss_terms = {} def set_loss(name, value): loss_terms[f"{loss_prefix}{name}"] = value guidance_eval = ( guidance == "zero123" and self.cfg.freq.guidance_eval > 0 and self.true_global_step % self.cfg.freq.guidance_eval == 0 ) if guidance == "ref": gt_mask = batch["mask"] gt_rgb = batch["rgb"] # color loss gt_rgb = gt_rgb * gt_mask.float() + out["comp_rgb_bg"] * ( 1 - gt_mask.float() ) set_loss("rgb", F.mse_loss(gt_rgb, out["comp_rgb"])) # mask loss set_loss("mask", F.mse_loss(gt_mask.float(), out["opacity"])) # depth loss if self.C(self.cfg.loss.lambda_depth) > 0: valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)].unsqueeze(1) valid_pred_depth = out["depth"][gt_mask].unsqueeze(1) with torch.no_grad(): A = torch.cat( [valid_gt_depth, torch.ones_like(valid_gt_depth)], dim=-1 ) # [B, 2] X = torch.linalg.lstsq(A, valid_pred_depth).solution # [2, 1] valid_gt_depth = A @ X # [B, 1] set_loss("depth", F.mse_loss(valid_gt_depth, valid_pred_depth)) # relative depth loss if self.C(self.cfg.loss.lambda_depth_rel) > 0: valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)] # [B,] valid_pred_depth = out["depth"][gt_mask] # [B,] set_loss( "depth_rel", 1 - self.pearson(valid_pred_depth, valid_gt_depth) ) # normal loss if self.C(self.cfg.loss.lambda_normal) > 0: valid_gt_normal = ( 1 - 2 * batch["ref_normal"][gt_mask.squeeze(-1)] ) # [B, 3] valid_pred_normal = ( 2 * out["comp_normal"][gt_mask.squeeze(-1)] - 1 ) # [B, 3] set_loss( "normal", 1 - F.cosine_similarity(valid_pred_normal, valid_gt_normal).mean(), ) elif guidance == "zero123": # zero123 guidance_out = self.guidance( out["comp_rgb"], **batch, rgb_as_latents=False, guidance_eval=guidance_eval, ) # claforte: TODO: rename the loss_terms keys set_loss("sds", guidance_out["loss_sds"]) if self.C(self.cfg.loss.lambda_normal_smooth) > 0: if "comp_normal" not in out: raise ValueError( "comp_normal is required for 2D normal smooth loss, no comp_normal is found in the output." ) normal = out["comp_normal"] set_loss( "normal_smooth", (normal[:, 1:, :, :] - normal[:, :-1, :, :]).square().mean() + (normal[:, :, 1:, :] - normal[:, :, :-1, :]).square().mean(), ) if self.C(self.cfg.loss.lambda_3d_normal_smooth) > 0: if "normal" not in out: raise ValueError( "Normal is required for normal smooth loss, no normal is found in the output." ) if "normal_perturb" not in out: raise ValueError( "normal_perturb is required for normal smooth loss, no normal_perturb is found in the output." ) normals = out["normal"] normals_perturb = out["normal_perturb"] set_loss("3d_normal_smooth", (normals - normals_perturb).abs().mean()) if not self.cfg.refinement: if self.C(self.cfg.loss.lambda_orient) > 0: if "normal" not in out: raise ValueError( "Normal is required for orientation loss, no normal is found in the output." ) set_loss( "orient", ( out["weights"].detach()
@threestudio.register("zero123-system") class Zero123(BaseLift3DSystem): @dataclass class Config(BaseLift3DSystem.Config): freq: dict = field(default_factory=dict) refinement: bool = False ambient_ratio_min: float = 0.5 cfg: Config def configure(self): # create geometry, material, background, renderer super().configure() def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]: render_out = self.renderer(**batch) return { **render_out, } def on_fit_start(self) -> None: super().on_fit_start() # no prompt processor self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance) # visualize all training images all_images = self.trainer.datamodule.train_dataloader().dataset.get_all_images() self.save_image_grid( "all_training_images.png", [ {"type": "rgb", "img": image, "kwargs": {"data_format": "HWC"}} for image in all_images ], name="on_fit_start", step=self.true_global_step, ) self.pearson = PearsonCorrCoef().to(self.device) def training_substep(self, batch, batch_idx, guidance: str): """ Args: guidance: one of "ref" (reference image supervision), "zero123" """ if guidance == "ref": # bg_color = torch.rand_like(batch['rays_o']) ambient_ratio = 1.0 shading = "diffuse" batch["shading"] = shading elif guidance == "zero123": batch = batch["random_camera"] ambient_ratio = ( self.cfg.ambient_ratio_min + (1 - self.cfg.ambient_ratio_min) * random.random() ) batch["bg_color"] = None batch["ambient_ratio"] = ambient_ratio out = self(batch) loss_prefix = f"loss_{guidance}_" loss_terms = {} def set_loss(name, value): loss_terms[f"{loss_prefix}{name}"] = value guidance_eval = ( guidance == "zero123" and self.cfg.freq.guidance_eval > 0 and self.true_global_step % self.cfg.freq.guidance_eval == 0 ) if guidance == "ref": gt_mask = batch["mask"] gt_rgb = batch["rgb"] # color loss gt_rgb = gt_rgb * gt_mask.float() + out["comp_rgb_bg"] * ( 1 - gt_mask.float() ) set_loss("rgb", F.mse_loss(gt_rgb, out["comp_rgb"])) # mask loss set_loss("mask", F.mse_loss(gt_mask.float(), out["opacity"])) # depth loss if self.C(self.cfg.loss.lambda_depth) > 0: valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)].unsqueeze(1) valid_pred_depth = out["depth"][gt_mask].unsqueeze(1) with torch.no_grad(): A = torch.cat( [valid_gt_depth, torch.ones_like(valid_gt_depth)], dim=-1 ) # [B, 2] X = torch.linalg.lstsq(A, valid_pred_depth).solution # [2, 1] valid_gt_depth = A @ X # [B, 1] set_loss("depth", F.mse_loss(valid_gt_depth, valid_pred_depth)) # relative depth loss if self.C(self.cfg.loss.lambda_depth_rel) > 0: valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)] # [B,] valid_pred_depth = out["depth"][gt_mask] # [B,] set_loss( "depth_rel", 1 - self.pearson(valid_pred_depth, valid_gt_depth) ) # normal loss if self.C(self.cfg.loss.lambda_normal) > 0: valid_gt_normal = ( 1 - 2 * batch["ref_normal"][gt_mask.squeeze(-1)] ) # [B, 3] valid_pred_normal = ( 2 * out["comp_normal"][gt_mask.squeeze(-1)] - 1 ) # [B, 3] set_loss( "normal", 1 - F.cosine_similarity(valid_pred_normal, valid_gt_normal).mean(), ) elif guidance == "zero123": # zero123 guidance_out = self.guidance( out["comp_rgb"], **batch, rgb_as_latents=False, guidance_eval=guidance_eval, ) # claforte: TODO: rename the loss_terms keys set_loss("sds", guidance_out["loss_sds"]) if self.C(self.cfg.loss.lambda_normal_smooth) > 0: if "comp_normal" not in out: raise ValueError( "comp_normal is required for 2D normal smooth loss, no comp_normal is found in the output." ) normal = out["comp_normal"] set_loss( "normal_smooth", (normal[:, 1:, :, :] - normal[:, :-1, :, :]).square().mean() + (normal[:, :, 1:, :] - normal[:, :, :-1, :]).square().mean(), ) if self.C(self.cfg.loss.lambda_3d_normal_smooth) > 0: if "normal" not in out: raise ValueError( "Normal is required for normal smooth loss, no normal is found in the output." ) if "normal_perturb" not in out: raise ValueError( "normal_perturb is required for normal smooth loss, no normal_perturb is found in the output." ) normals = out["normal"] normals_perturb = out["normal_perturb"] set_loss("3d_normal_smooth", (normals - normals_perturb).abs().mean()) if not self.cfg.refinement: if self.C(self.cfg.loss.lambda_orient) > 0: if "normal" not in out: raise ValueError( "Normal is required for orientation loss, no normal is found in the output." ) set_loss( "orient", ( out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
2
2023-12-27 20:30:33+00:00
4k
jasursadikov/mud
commands.py
[ { "identifier": "TEXT", "path": "utils.py", "snippet": "TEXT = {\n 'white': '\\033[37m',\n 'gray': '\\033[90m',\n 'black': '\\033[30m',\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'bright_white': '\\033[97m',\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n}" }, { "identifier": "BACK", "path": "utils.py", "snippet": "BACK = {\n 'white': '\\033[47m',\n 'medium_gray': '\\033[100m',\n 'black': '\\033[40m',\n 'red': '\\033[41m',\n 'green': '\\033[42m',\n 'yellow': '\\033[43m',\n 'blue': '\\033[44m',\n 'magenta': '\\033[45m',\n 'cyan': '\\033[46m',\n 'bright_white': '\\033[107m',\n 'bright_red': '\\033[101m',\n 'bright_green': '\\033[102m',\n 'bright_yellow': '\\033[103m',\n 'bright_blue': '\\033[104m',\n 'bright_magenta': '\\033[105m',\n 'bright_cyan': '\\033[106m',\n}" }, { "identifier": "RESET", "path": "utils.py", "snippet": "RESET = '\\033[0m'" }, { "identifier": "STYLES", "path": "utils.py", "snippet": "STYLES = {\n 'bold': '\\033[1m',\n 'dim': '\\033[2m',\n 'italic': '\\033[3m',\n 'underline': '\\033[4m',\n 'blink': '\\033[5m',\n}" }, { "identifier": "END_STYLES", "path": "utils.py", "snippet": "END_STYLES = {\n 'bold': '\\033[22m',\n 'dim': '\\033[22m',\n 'italic': '\\033[23m',\n 'underline': '\\033[24m',\n 'blink': '\\033[25m',\n}" }, { "identifier": "glyph", "path": "utils.py", "snippet": "def glyph(key: str) -> str:\n return GLYPHS[key][0] if settings.mud_settings['nerd_fonts'] else GLYPHS[key][1]" } ]
import utils import asyncio import subprocess from utils import TEXT, BACK, RESET, STYLES, END_STYLES, glyph from typing import List, Dict from collections import Counter from prettytable import PrettyTable, PLAIN_COLUMNS
3,475
if not line: break line = line.decode().strip() line = table[repo_path][0] if not line.strip() else line table[repo_path] = [line, f'{TEXT["yellow"]}{glyph("running")}'] self._print_process(table) return_code = await process.wait() if return_code == 0: status = f'{TEXT["green"]}{glyph("finished")}' else: status = f'{TEXT["red"]}{glyph("failed")} Code: {return_code}' table[repo_path] = [table[repo_path][0], status] self._print_process(table) def _print_process(self, info: Dict[str, List[str]]) -> None: table = self._get_table() for path, (line, status) in info.items(): formatted_path = self._get_formatted_path(path) table.add_row([formatted_path, line, status]) print(f'\x1bc{self._table_to_str(table)}\n', end='') def _print_table(self, table: PrettyTable): table = self._table_to_str(table) if len(table) != 0: print(table) @staticmethod def _table_to_str(table: PrettyTable) -> str: table = table.get_string() table = '\n'.join(line.lstrip() for line in table.splitlines()) return table @staticmethod def _get_table() -> PrettyTable: return PrettyTable(border=False, header=False, style=PLAIN_COLUMNS, align='l') # Prettified repository path @staticmethod def _get_formatted_path(path: str) -> str: return f'{STYLES["dim"]}{TEXT["gray"]}../{RESET}{STYLES["dim"]}{path}{RESET}' # Displaying current branch @staticmethod def _get_branch_status(path: str) -> str: branch_cmd = subprocess.run('git rev-parse --abbrev-ref HEAD', shell=True, text=True, cwd=path, capture_output=True) branch_stdout = branch_cmd.stdout.strip() if branch_stdout == 'master' or branch_stdout == 'main': branch = f'{TEXT["yellow"]}{glyph("master")}{RESET} {branch_stdout}' elif branch_stdout == 'develop': branch = f'{TEXT["green"]}{glyph("feature")}{RESET} {branch_stdout}' elif '/' in branch_stdout: branch_path = branch_stdout.split('/') icon = branch_path[0] icon = f'{TEXT["red"]}{glyph("bugfix")}{RESET}' if icon in ['bugfix', 'bug', 'hotfix'] else \ f'{TEXT["blue"]}{glyph("release")}{RESET}' if icon == 'release' else \ f'{TEXT["green"]}{glyph("feature")}{RESET}' if icon in ['feature', 'feat', 'develop'] else \ f'{TEXT["green"]}{glyph("branch")}{RESET}' branch = f'{icon} {STYLES["bold"]}{branch_path[0]}{RESET}/{STYLES["bold"]}{("/".join(branch_path[1:]))}' else: branch = f'{TEXT["cyan"]}{glyph("branch")}{RESET} {branch_stdout}' return branch # Last author's name @staticmethod def _get_authors_name(path: str) -> str: cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%an'], text=True, cwd=path, capture_output=True) git_config_user_cmd = subprocess.run(['git', 'config', 'user.name'], text=True, capture_output=True) committer_color = '' if cmd.stdout.strip() == git_config_user_cmd.stdout.strip() else STYLES["dim"] author = cmd.stdout.strip() author = author[:20] + '...' if len(author) > 20 else author author = f'{committer_color}{author}{RESET}' return author # Last commit message @staticmethod def _get_commit_message(path: str, max_chars: int) -> str: cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%s'], text=True, cwd=path, capture_output=True) log = cmd.stdout.strip() log = log[:max_chars] + '...' if len(log) > max_chars else log return log def _get_formatted_labels(self, labels: List[str]) -> str: if len(labels) == 0: return '' colored_label = '' for label in labels: color_index = self._get_color_index(label) % len(TEXT) colored_label += f'{TEXT[list(TEXT.keys())[color_index + 3]]}{glyph("label")}{RESET} {label} ' return colored_label @staticmethod def _get_formatted_branches(branches: List[str], current_branch: str) -> str: if len(branches) == 0: return '' simplify_branches = utils.settings.config['mud'].getboolean('simplify_branches') == True output = '' for branch in branches: is_origin = branch.startswith('origin/') branch = branch.replace('origin/', '') if is_origin else branch current_prefix = f'{STYLES["italic"]}{STYLES["bold"]}' if current_branch == branch else '' current_prefix = current_prefix + STYLES['dim'] if is_origin else current_prefix origin_prefix = f'{TEXT["magenta"]}{STYLES["dim"]}o/' if is_origin else '' color = 'white' icon = glyph('branch') if branch == 'master' or branch == 'main': color = 'yellow' icon = f'{glyph("master")}' elif branch == 'develop': color = 'green' icon = f'{glyph("feature")}' elif '/' in branch: parts = branch.split('/')
class Commands: def __init__(self, repos): self.repos = repos self.label_color_cache = {} self.current_color_index = 0 # `mud status` command implementation def status(self, repos: Dict[str, List[str]]) -> None: table = self._get_table() for path, tags in repos.items(): formatted_path = self._get_formatted_path(path) branch = self._get_branch_status(path) author = self._get_authors_name(path) commit = self._get_commit_message(path, 30) colored_labels = self._get_formatted_labels(tags) # Sync with origin status ahead_behind_cmd = subprocess.run(['git', 'rev-list', '--left-right', '--count', 'HEAD...@{upstream}'], text=True, cwd=path, capture_output=True) stdout = ahead_behind_cmd.stdout.strip().split() if len(stdout) >= 2: ahead, behind = stdout[0], stdout[1] origin_sync = '' if ahead and ahead != '0': origin_sync += f'{TEXT["bright_green"]}{glyph("ahead")} {ahead}{RESET}' if behind and behind != '0': if origin_sync: origin_sync += ' ' origin_sync += f'{TEXT["bright_blue"]}{glyph("behind")} {behind}{RESET}' else: origin_sync = '' # Git status status_cmd = subprocess.run(['git', 'status', '-s'], text=True, cwd=path, capture_output=True) files = [line.lstrip() for line in status_cmd.stdout.strip().splitlines()] modified, added, removed, moved = 0, 0, 0, 0 for file in files: if file.startswith('M'): modified += 1 elif file.startswith('A') or file.startswith('??'): added += 1 elif file.startswith('D'): removed += 1 elif file.startswith('R'): moved += 1 status = '' if added: status += f'{TEXT["bright_green"]}{added} {glyph("added")}{RESET} ' if modified: status += f'{TEXT["yellow"]}{modified} {glyph("modified")}{RESET} ' if moved: status += f'{TEXT["blue"]}{moved} {glyph("moved")}{RESET} ' if removed: status += f'{TEXT["red"]}{removed} {glyph("removed")}{RESET} ' if not files: status = f'{TEXT["green"]}{glyph("clear")}{RESET}' table.add_row([formatted_path, branch, origin_sync, status, author, commit, colored_labels]) self._print_table(table) # `mud log` command implementation def log(self, repos: Dict[str, List[str]]) -> None: table = self._get_table() for path, labels in repos.items(): formatted_path = self._get_formatted_path(path) branch = self._get_branch_status(path) author = self._get_authors_name(path) commit = self._get_commit_message(path, 35) colored_labels = self._get_formatted_labels(labels) # Commit time commit_time_cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%cd', '--date=relative'], text=True, cwd=path, capture_output=True) commit_time = commit_time_cmd.stdout.strip() table.add_row([formatted_path, branch, author, commit_time, commit, colored_labels]) self._print_table(table) # `mud branch` command implementation def branches(self, repos: Dict[str, List[str]]) -> None: table = self._get_table() all_branches = {} for path in repos.keys(): raw_branches = [line.strip() for line in subprocess.check_output(['git', 'branch'], text=True, cwd=path).split('\n') if line.strip()] for branch in raw_branches: branch = branch.replace(' ', '').replace('*', '') if branch not in all_branches: all_branches[branch] = 0 all_branches[branch] += 1 branch_counter = Counter(all_branches) for path, labels in repos.items(): formatted_path = self._get_formatted_path(path) branches = subprocess.check_output(['git', 'branch'], text=True, cwd=path).splitlines() current_branch = next((branch.lstrip('* ') for branch in branches if branch.startswith('*')), None) branches = [branch.lstrip('* ') for branch in branches] sorted_branches = sorted(branches, key=lambda x: branch_counter.get(x, 0), reverse=True) if current_branch and current_branch in sorted_branches: sorted_branches.remove(current_branch) sorted_branches.insert(0, current_branch) formatted_branches = self._get_formatted_branches(sorted_branches, current_branch) colored_labels = self._get_formatted_labels(labels) table.add_row([formatted_path, formatted_branches, colored_labels]) self._print_table(table) # `mud <COMMAND>` when run_async = 0 and run_table = 0 def run_ordered(self, repos: List[str], command: [str]) -> None: for path in repos: print(f'{self._get_formatted_path(path)}{RESET} {command}{RESET}') result = subprocess.run(command, shell=True, cwd=path, capture_output=True, text=True) if result.stderr: print(result.stderr) if result.stdout and not result.stdout.isspace(): print(result.stdout) # `mud <COMMAND>` when run_async = 1 and run_table = 0 async def run_async(self, repos: List[str], command: str) -> None: sem = asyncio.Semaphore(len(repos)) async def run_process(path: str) -> None: async with sem: process = await asyncio.create_subprocess_shell(command, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = await process.communicate() print(f'{self._get_formatted_path(path)}>{RESET} {command}') if stderr: print(stderr.decode()) if stdout and not stdout.isspace(): print(stdout.decode()) await asyncio.gather(*(run_process(path) for path in repos)) # `mud <COMMAND>` when run_async = 1 and run_table = 1 async def run_async_table_view(self, repos: List[str], command: str) -> None: sem = asyncio.Semaphore(len(repos)) table = {repo: ['', ''] for repo in repos} async def task(repo: str) -> None: async with sem: await self._run_process(repo, table, command) tasks = [asyncio.create_task(task(repo)) for repo in repos] await asyncio.gather(*tasks) async def _run_process(self, repo_path: str, table: Dict[str, List[str]], command: str) -> None: process = await asyncio.create_subprocess_shell(command, cwd=repo_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) while True: line = await process.stdout.readline() if not line: break line = line.decode().strip() line = table[repo_path][0] if not line.strip() else line table[repo_path] = [line, f'{TEXT["yellow"]}{glyph("running")}'] self._print_process(table) return_code = await process.wait() if return_code == 0: status = f'{TEXT["green"]}{glyph("finished")}' else: status = f'{TEXT["red"]}{glyph("failed")} Code: {return_code}' table[repo_path] = [table[repo_path][0], status] self._print_process(table) def _print_process(self, info: Dict[str, List[str]]) -> None: table = self._get_table() for path, (line, status) in info.items(): formatted_path = self._get_formatted_path(path) table.add_row([formatted_path, line, status]) print(f'\x1bc{self._table_to_str(table)}\n', end='') def _print_table(self, table: PrettyTable): table = self._table_to_str(table) if len(table) != 0: print(table) @staticmethod def _table_to_str(table: PrettyTable) -> str: table = table.get_string() table = '\n'.join(line.lstrip() for line in table.splitlines()) return table @staticmethod def _get_table() -> PrettyTable: return PrettyTable(border=False, header=False, style=PLAIN_COLUMNS, align='l') # Prettified repository path @staticmethod def _get_formatted_path(path: str) -> str: return f'{STYLES["dim"]}{TEXT["gray"]}../{RESET}{STYLES["dim"]}{path}{RESET}' # Displaying current branch @staticmethod def _get_branch_status(path: str) -> str: branch_cmd = subprocess.run('git rev-parse --abbrev-ref HEAD', shell=True, text=True, cwd=path, capture_output=True) branch_stdout = branch_cmd.stdout.strip() if branch_stdout == 'master' or branch_stdout == 'main': branch = f'{TEXT["yellow"]}{glyph("master")}{RESET} {branch_stdout}' elif branch_stdout == 'develop': branch = f'{TEXT["green"]}{glyph("feature")}{RESET} {branch_stdout}' elif '/' in branch_stdout: branch_path = branch_stdout.split('/') icon = branch_path[0] icon = f'{TEXT["red"]}{glyph("bugfix")}{RESET}' if icon in ['bugfix', 'bug', 'hotfix'] else \ f'{TEXT["blue"]}{glyph("release")}{RESET}' if icon == 'release' else \ f'{TEXT["green"]}{glyph("feature")}{RESET}' if icon in ['feature', 'feat', 'develop'] else \ f'{TEXT["green"]}{glyph("branch")}{RESET}' branch = f'{icon} {STYLES["bold"]}{branch_path[0]}{RESET}/{STYLES["bold"]}{("/".join(branch_path[1:]))}' else: branch = f'{TEXT["cyan"]}{glyph("branch")}{RESET} {branch_stdout}' return branch # Last author's name @staticmethod def _get_authors_name(path: str) -> str: cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%an'], text=True, cwd=path, capture_output=True) git_config_user_cmd = subprocess.run(['git', 'config', 'user.name'], text=True, capture_output=True) committer_color = '' if cmd.stdout.strip() == git_config_user_cmd.stdout.strip() else STYLES["dim"] author = cmd.stdout.strip() author = author[:20] + '...' if len(author) > 20 else author author = f'{committer_color}{author}{RESET}' return author # Last commit message @staticmethod def _get_commit_message(path: str, max_chars: int) -> str: cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%s'], text=True, cwd=path, capture_output=True) log = cmd.stdout.strip() log = log[:max_chars] + '...' if len(log) > max_chars else log return log def _get_formatted_labels(self, labels: List[str]) -> str: if len(labels) == 0: return '' colored_label = '' for label in labels: color_index = self._get_color_index(label) % len(TEXT) colored_label += f'{TEXT[list(TEXT.keys())[color_index + 3]]}{glyph("label")}{RESET} {label} ' return colored_label @staticmethod def _get_formatted_branches(branches: List[str], current_branch: str) -> str: if len(branches) == 0: return '' simplify_branches = utils.settings.config['mud'].getboolean('simplify_branches') == True output = '' for branch in branches: is_origin = branch.startswith('origin/') branch = branch.replace('origin/', '') if is_origin else branch current_prefix = f'{STYLES["italic"]}{STYLES["bold"]}' if current_branch == branch else '' current_prefix = current_prefix + STYLES['dim'] if is_origin else current_prefix origin_prefix = f'{TEXT["magenta"]}{STYLES["dim"]}o/' if is_origin else '' color = 'white' icon = glyph('branch') if branch == 'master' or branch == 'main': color = 'yellow' icon = f'{glyph("master")}' elif branch == 'develop': color = 'green' icon = f'{glyph("feature")}' elif '/' in branch: parts = branch.split('/')
end_dim = '' if is_origin else END_STYLES["dim"]
4
2023-12-28 13:09:31+00:00
4k
RaceCrewAI/gt-telem
gt_telem/models/telemetry.py
[ { "identifier": "format_time", "path": "gt_telem/models/helpers.py", "snippet": "def format_time(milliseconds):\n \"\"\"\n Format milliseconds into a time string (MM:SS.sss).\n\n Parameters:\n - milliseconds (int): Time in milliseconds.\n\n Returns:\n str: Formatted time string.\n \"\"\"\n milliseconds = max(0, int(milliseconds))\n minutes, milliseconds = divmod(milliseconds, 60000)\n seconds, milliseconds = divmod(milliseconds, 1000)\n return f\"{minutes:02}:{seconds:02}.{milliseconds:03}\"" }, { "identifier": "format_time_of_day", "path": "gt_telem/models/helpers.py", "snippet": "def format_time_of_day(milliseconds, use_24hr=False):\n \"\"\"\n Format milliseconds into a time of day string.\n\n Parameters:\n - milliseconds (int): Time in milliseconds.\n - use_24hr (bool): Flag indicating whether to use 24-hour format. Default is False.\n\n Returns:\n str: Formatted time of day string.\n \"\"\"\n milliseconds = max(0, int(milliseconds))\n hours, milliseconds = divmod(milliseconds, 3600000)\n minutes, milliseconds = divmod(milliseconds, 60000)\n seconds, milliseconds = divmod(milliseconds, 1000)\n\n if use_24hr:\n am_pm = \"AM\" if hours < 12 else \"PM\"\n hours = hours % 12 or 12\n return f\"{hours:02}:{minutes:02}:{seconds:02} {am_pm}\"\n else:\n return f\"{hours:02}:{minutes:02}:{seconds:02}\"" }, { "identifier": "Vector3D", "path": "gt_telem/models/models.py", "snippet": "" }, { "identifier": "TelemetryPacket", "path": "gt_telem/models/telemetry_packet.py", "snippet": "class TelemetryPacket:\n position_x: float\n position_y: float\n position_z: float\n velocity_x: float\n velocity_y: float\n velocity_z: float\n rotation_x: float\n rotation_y: float\n rotation_z: float\n orientation: float\n ang_vel_x: float\n ang_vel_y: float\n ang_vel_z: float\n body_height: float\n engine_rpm: float\n iv: float\n fuel_level: float\n fuel_capacity: float\n speed_mps: float\n boost_pressure: float\n oil_pressure: float\n water_temp: float\n oil_temp: float\n tire_fl_temp: float\n tire_fr_temp: float\n tire_rl_temp: float\n tire_rr_temp: float\n packet_id: int\n current_lap: int\n total_laps: int\n best_lap_time_ms: int\n last_lap_time_ms: int\n time_of_day_ms: int\n race_start_pos: int\n total_cars: int\n min_alert_rpm: int\n max_alert_rpm: int\n calc_max_speed: int\n flags: int\n bits: int\n throttle: int\n brake: int\n empty: int\n road_plane_x: float\n road_plane_y: float\n road_plane_z: float\n road_plane_dist: float\n wheel_fl_rps: float\n wheel_fr_rps: float\n wheel_rl_rps: float\n wheel_rr_rps: float\n tire_fl_radius: float\n tire_fr_radius: float\n tire_rl_radius: float\n tire_rr_radius: float\n tire_fl_sus_height: float\n tire_fr_sus_height: float\n tire_rl_sus_height: float\n tire_rr_sus_height: float\n unused1: int\n unused2: int\n unused3: int\n unused4: int\n unused5: int\n unused6: int\n unused7: int\n unused8: int\n clutch_pedal: float\n clutch_engagement: float\n trans_rpm: float\n trans_top_speed: float\n gear1: float\n gear2: float\n gear3: float\n gear4: float\n gear5: float\n gear6: float\n gear7: float\n gear8: float\n car_code: int" } ]
from datetime import datetime from gt_telem.models.helpers import format_time, format_time_of_day from gt_telem.models.models import Vector3D, WheelMetric from gt_telem.models.telemetry_packet import TelemetryPacket
3,293
@property def cars_on_track(self) -> bool: """ Check if there are cars on the track. """ return bool(1<<0 & self.flags) @property def is_paused(self) -> bool: """ Check if the simulation is paused. """ return bool(1<<1 & self.flags) @property def is_loading(self) -> bool: """ Check if the simulation is loading. """ return bool(1<<2 & self.flags) @property def in_gear(self) -> bool: """ Check if the vehicle is in gear. """ return bool(1<<3 & self.flags) @property def has_turbo(self) -> bool: """ Check if the vehicle has a turbo. """ return bool(1<<4 & self.flags) @property def rev_limit(self) -> bool: """ Check if the vehicle is at the rev limit. """ return bool(1<<5 & self.flags) @property def hand_brake_active(self) -> bool: """ Check if the hand brake is active. """ return bool(1<<6 & self.flags) @property def lights_active(self) -> bool: """ Check if the lights are active. """ return bool(1<<7 & self.flags) @property def high_beams(self) -> bool: """ Check if the high beams are active. """ return bool(1<<8 & self.flags) @property def low_beams(self) -> bool: """ Check if the low beams are active. """ return bool(1<<9 & self.flags) @property def asm_active(self) -> bool: """ Check if the ASM (Active Stability Management) is active. """ return bool(1<<10 & self.flags) @property def tcs_active(self) -> bool: """ Check if the TCS (Traction Control System) is active. """ return bool(1<<11 & self.flags) @property def unknown_bool_1(self) -> bool: """ Get the value of an unknown boolean flag. """ return bool(1<<12 & self.flags) @property def unknown_bool_2(self) -> bool: """ Not sure """ return bool(1<<13 & self.flags) @property def unknown_bool_3(self) -> bool: """ Get the value of another unknown boolean flag. """ return bool(1<<14 & self.flags) @property def unknown_bool_4(self) -> bool: """ Get the value of another unknown boolean flag. """ return bool(1<<15 & self.flags) @property def best_lap_time(self) -> str: """ Get the formatted best lap time. """ if self.best_lap_time_ms == -1: return None
class Telemetry(TelemetryPacket): """ Telemetry data from Gran Turismo Attributes: - position_x: float - X-coordinate of the position. - position_y: float - Y-coordinate of the position. - position_z: float - Z-coordinate of the position. - velocity_x: float - X-component of velocity. - velocity_y: float - Y-component of velocity. - velocity_z: float - Z-component of velocity. - rotation_x: float - X-component of rotation. - rotation_y: float - Y-component of rotation. - rotation_z: float - Z-component of rotation. - orientation: float - Orientation. - ang_vel_x: float - X-component of angular velocity. - ang_vel_y: float - Y-component of angular velocity. - ang_vel_z: float - Z-component of angular velocity. - body_height: float - Height of the body. - engine_rpm: float - Engine RPM. - iv: float - IV, used for encryption. - fuel_level: float - Fuel level. - fuel_capacity: float - Fuel capacity. - speed_mps: float - Speed in meters per second. - boost_pressure: float - Boost pressure. - oil_pressure: float - Oil pressure. - water_temp: float - Water temperature. - oil_temp: float - Oil temperature. - tire_fl_temp: float - Front-left tire temperature. - tire_fr_temp: float - Front-right tire temperature. - tire_rl_temp: float - Rear-left tire temperature. - tire_rr_temp: float - Rear-right tire temperature. - packet_id: int - Packet ID. - current_lap: int - Current lap. - total_laps: int - Total laps. - best_lap_time_ms: int - Best lap time in milliseconds. - last_lap_time_ms: int - Last lap time in milliseconds. - time_of_day_ms: int - Time of day in milliseconds. - race_start_pos: int - Race start position. - total_cars: int - Total number of cars. - min_alert_rpm: int - Minimum alert RPM. - max_alert_rpm: int - Maximum alert RPM. - calc_max_speed: int - Calculated maximum speed. - flags: int - byte that contains current/suggested gear. - bits: int - Collection of booleans - see properties. - throttle: int - Throttle. - brake: int - Brake. - empty: int - Unused. - road_plane_x: float - X-coordinate of the road plane. - road_plane_y: float - Y-coordinate of the road plane. - road_plane_z: float - Z-coordinate of the road plane. - road_plane_dist: float - Distance of the road plane. Not sure what this is. - wheel_fl_rps: float - Front-left wheel revolutions per second. - wheel_fr_rps: float - Front-right wheel revolutions per second. - wheel_rl_rps: float - Rear-left wheel revolutions per second. - wheel_rr_rps: float - Rear-right wheel revolutions per second. - tire_fl_radius: float - Front-left tire radius. - tire_fr_radius: float - Front-right tire radius. - tire_rl_radius: float - Rear-left tire radius. - tire_rr_radius: float - Rear-right tire radius. - tire_fl_sus_height: float - Front-left tire suspension height. - tire_fr_sus_height: float - Front-right tire suspension height. - tire_rl_sus_height: float - Rear-left tire suspension height. - tire_rr_sus_height: float - Rear-right tire suspension height. - unused1: int - Unused variable 1. - unused2: int - Unused variable 2. - unused3: int - Unused variable 3. - unused4: int - Unused variable 4. - unused5: int - Unused variable 5. - unused6: int - Unused variable 6. - unused7: int - Unused variable 7. - unused8: int - Unused variable 8. - clutch_pedal: float - Clutch pedal position. - clutch_engagement: float - Clutch engagement. - trans_rpm: float - Transmission RPM. - trans_top_speed: float - Transmission top speed. - gear1: float - Gear 1. - gear2: float - Gear 2. - gear3: float - Gear 3. - gear4: float - Gear 4. - gear5: float - Gear 5. - gear6: float - Gear 6. - gear7: float - Gear 7. - gear8: float - Gear 8. - car_code: int - Car code - on vehicles with more than 8 gears, this is corrupted. Properties: - position: Get the position as a Vector3D. - velocity: Get the velocity as a Vector3D. - rotation: Get the rotation as a Vector3D. - angular_velocity: Get the angular velocity as a Vector3D. - road_plane: Get the road plane coordinates as a Vector3D. - tire_temp: Get tire temperatures as a WheelMetric. - wheel_rps: Get wheel revolutions per second as a WheelMetric. - tire_radius: Get tire radii as a WheelMetric. - suspension_height: Get suspension heights as a WheelMetric. - current_gear: Get the current gear. - suggested_gear: Get the suggested gear. - speed_kph: Get the speed in kilometers per hour. - speed_mph: Get the speed in miles per hour. - cars_on_track: Check if there are cars on the track. - is_paused: Check if the simulation is paused. - is_loading: Check if the simulation is loading. - in_gear: Check if the vehicle is in gear. - has_turbo: Check if the vehicle has a turbo. - rev_limit: Check if the vehicle is at the rev limit. - hand_brake_active: Check if the hand brake is active. - lights_active: Check if the lights are active. - high_beams: Check if the high beams are active. - low_beams: Check if the low beams are active. - asm_active: Check if the ASM (Active Stability Management) is active. - tcs_active: Check if the TCS (Traction Control System) is active. - unknown_bool_1: Purpose unknown. - unknown_bool_2: Purpose unknown. - unknown_bool_3: Purpose unknown. - unknown_bool_4: Purpose unknown. - best_lap_time: Get the formatted best lap time. - last_lap_time: Get the formatted last lap time. - time_of_day: Get the formatted time of day. Methods - as_dict: Get the state of the object in a dictionary format. """ def __post_init__(self): self.time = datetime.now() @property def position(self) -> Vector3D: """ Get the position as a Vector3D. """ return Vector3D(self.position_x, self.position_y, self.position_z) @property def velocity(self) -> Vector3D: """ Get the velocity as a Vector3D. """ return Vector3D(self.velocity_x, self.velocity_y, self.velocity_z) @property def rotation(self) -> Vector3D: """ Get the rotation as a Vector3D. """ return Vector3D(self.rotation_x, self.rotation_y, self.rotation_z) @property def angular_velocity(self) -> Vector3D: """ Get the angular velocity as a Vector3D. """ return Vector3D(self.ang_vel_x, self.ang_vel_y, self.ang_vel_z) @property def road_plane(self) -> Vector3D: """ Get the road plane coordinates as a Vector3D. """ return Vector3D(self.road_plane_x, self.road_plane_y, self.road_plane_z) @property def tire_temp(self) -> WheelMetric: """ Get tire temperatures as a WheelMetric. """ return WheelMetric( self.tire_fl_temp, self.tire_fr_temp, self.tire_rl_temp, self.tire_rr_temp ) @property def wheel_rps(self) -> WheelMetric: """ Get wheel revolutions per second as a WheelMetric. """ return WheelMetric( self.wheel_fl_rps, self.wheel_fr_rps, self.wheel_rl_rps, self.wheel_rr_rps ) @property def tire_radius(self) -> WheelMetric: """ Get tire radii as a WheelMetric. """ return WheelMetric( self.tire_fl_radius, self.tire_fr_radius, self.tire_rl_radius, self.tire_rr_radius, ) @property def suspension_height(self) -> WheelMetric: """ Get suspension heights as a WheelMetric. """ return WheelMetric( self.tire_fl_sus_height, self.tire_fr_sus_height, self.tire_rl_sus_height, self.tire_rr_sus_height, ) @property def current_gear(self) -> int: """ Get the current gear. """ return self.bits & 0b1111 @property def suggested_gear(self) -> int: """ Get the suggested gear. """ return self.bits >> 4 @property def speed_kph(self) -> float: """ Get the speed in kilometers per hour. """ return self.speed_mps * 3.6 @property def speed_mph(self) -> float: """ Get the speed in miles per hour. """ return self.speed_mps * 2.23694 @property def cars_on_track(self) -> bool: """ Check if there are cars on the track. """ return bool(1<<0 & self.flags) @property def is_paused(self) -> bool: """ Check if the simulation is paused. """ return bool(1<<1 & self.flags) @property def is_loading(self) -> bool: """ Check if the simulation is loading. """ return bool(1<<2 & self.flags) @property def in_gear(self) -> bool: """ Check if the vehicle is in gear. """ return bool(1<<3 & self.flags) @property def has_turbo(self) -> bool: """ Check if the vehicle has a turbo. """ return bool(1<<4 & self.flags) @property def rev_limit(self) -> bool: """ Check if the vehicle is at the rev limit. """ return bool(1<<5 & self.flags) @property def hand_brake_active(self) -> bool: """ Check if the hand brake is active. """ return bool(1<<6 & self.flags) @property def lights_active(self) -> bool: """ Check if the lights are active. """ return bool(1<<7 & self.flags) @property def high_beams(self) -> bool: """ Check if the high beams are active. """ return bool(1<<8 & self.flags) @property def low_beams(self) -> bool: """ Check if the low beams are active. """ return bool(1<<9 & self.flags) @property def asm_active(self) -> bool: """ Check if the ASM (Active Stability Management) is active. """ return bool(1<<10 & self.flags) @property def tcs_active(self) -> bool: """ Check if the TCS (Traction Control System) is active. """ return bool(1<<11 & self.flags) @property def unknown_bool_1(self) -> bool: """ Get the value of an unknown boolean flag. """ return bool(1<<12 & self.flags) @property def unknown_bool_2(self) -> bool: """ Not sure """ return bool(1<<13 & self.flags) @property def unknown_bool_3(self) -> bool: """ Get the value of another unknown boolean flag. """ return bool(1<<14 & self.flags) @property def unknown_bool_4(self) -> bool: """ Get the value of another unknown boolean flag. """ return bool(1<<15 & self.flags) @property def best_lap_time(self) -> str: """ Get the formatted best lap time. """ if self.best_lap_time_ms == -1: return None
return format_time(self.best_lap_time_ms)
0
2023-12-23 03:37:54+00:00
4k
Cl0udG0d/GPTHack
gui/server/background.py
[ { "identifier": "ChatGpt", "path": "core/chatgpt_web/chatgpt.py", "snippet": "class ChatGpt():\n session_data = {}\n\n @classmethod\n def get_hi_data(cls):\n return {\"prompt\": \"hi\", \"options\": {},\n \"systemMessage\": \"你是ChatGPT,一个由OpenAI训练的大型语言模型。尽可能详细而准确地回答我们提出的问题 谢谢\\n\"}\n\n @classmethod\n def get_empty_data(cls, prompt):\n return {\"prompt\": prompt, \"options\": {},\n \"systemMessage\": \"你是ChatGPT,一个由OpenAI训练的大型语言模型。尽可能详细而准确地回答我们提出的问题 谢谢\\n\"}\n\n @classmethod\n def get_version_data(cls):\n return {\"prompt\": \"你的GPT版本是多少 请详细回答\", \"options\": {},\n \"systemMessage\": \"你是ChatGPT,一个由OpenAI训练的大型语言模型。尽可能详细而准确地回答我们提出的问题 谢谢\\n\"}\n\n @classmethod\n def get_format_data(cls, prompt, parentMessageId):\n return {\"prompt\": prompt,\n \"options\": {\n \"parentMessageId\": parentMessageId\n },\n \"systemMessage\": \"你是ChatGPT,一个由OpenAI训练的大型语言模型。尽可能详细而准确地回答我们提出的问题 谢谢\\n\"}\n\n @classmethod\n def get_response(cls, url):\n try:\n response = requests.post(f\"{url}/api/chat-process\", json=cls.get_empty_data(\"hi\"),\n timeout=config.POST_TIMEOUT, verify=False)\n response.raise_for_status()\n last_line = None\n for line in response.iter_lines():\n\n if line:\n # 进行其他操作或处理逻辑\n last_line = line\n return last_line\n except Exception as e:\n if config.DEBUG:\n print(e)\n return None\n\n @classmethod\n def get_parentMessageId(cls,\n conversation_id: str):\n if cls.session_data.get(conversation_id):\n return cls.session_data.get(conversation_id)\n return \"\"\n\n @classmethod\n def get_new_gpt_site(cls):\n # print(config.ROOT_DIRECTORY)\n # print(config.GPT_FILEPATH)\n with open(config.GPT_FILEPATH, \"r\") as file:\n first_line = file.readline()\n\n if first_line:\n return first_line.split('|')[0]\n return \"\"\n\n @classmethod\n def create_completion(\n cls,\n conversation_id: str,\n messages: Messages,\n stream: bool,\n **kwargs\n ) -> CreateResult:\n url = cls.get_new_gpt_site()\n prompt = messages[-1][\"content\"]\n parentMessageId = cls.get_parentMessageId(conversation_id)\n data = cls.get_format_data(prompt, parentMessageId)\n if config.DEBUG:\n print(data)\n session = requests.Session()\n last_line = None\n with session.post(f\"{url}/api/chat-process\", json=data, stream=stream) as response:\n response.raise_for_status()\n for line in response.iter_lines():\n if line == b\"<script>\":\n raise RuntimeError(\"Solve challenge and pass cookies\")\n\n if b\"platform's risk control\" in line:\n raise RuntimeError(\"Platform's Risk Control\")\n\n if line:\n # 进行其他操作或处理逻辑\n last_line = line\n data = json.loads(line)\n result = data['delta'] if 'delta' in data else \"\"\n yield result\n line = json.loads(last_line)\n cls.session_data[conversation_id] = line['id']\n\n @classmethod\n def create(cls,\n conversation_id: str,\n messages: Messages,\n stream: bool,\n **kwargs) -> Union[CreateResult, str]:\n result = cls.create_completion(\n conversation_id=conversation_id,\n messages=messages,\n stream=True\n )\n return result if stream else ''.join(result)\n\n @classmethod\n def check_alive(cls,\n url):\n session = requests.Session()\n start_time = time.time() # 记录开始时间\n try:\n with session.post(f\"{url}/api/chat-process\", json=cls.get_hi_data(), stream=True, verify=False,\n timeout=config.POST_TIMEOUT) as response:\n response.raise_for_status()\n\n for line in response.iter_lines():\n last_line = None\n\n if line:\n # 进行其他操作或处理逻辑\n last_line = line\n\n # 在循环结束后处理最后的流数据\n end_time = time.time()\n execution_time = end_time - start_time # 计算执行时间\n if last_line is not None:\n data = json.loads(last_line)\n # print(data)\n if 'text' in data and data['text'] != \"\" and (\n data['text'].startswith(\"H\") or data['text'].startswith(\"h\"))\\\n and 'delta' in data:\n return True, execution_time\n else:\n return False, config.POST_TIMEOUT\n except Exception as e:\n print(e)\n return False, config.POST_TIMEOUT\n\n @classmethod\n def check_gpt_version(cls,\n url):\n session = requests.Session()\n try:\n with session.post(f\"{url}/api/chat-process\", json=cls.get_version_data(), stream=True, verify=False,\n timeout=config.POST_TIMEOUT) as response:\n response.raise_for_status()\n\n for line in response.iter_lines():\n last_line = None\n\n if line:\n # 进行其他操作或处理逻辑\n last_line = line\n\n if last_line is not None:\n data = json.loads(last_line)\n # print(data)\n if 'text' in data and data['text'] != \"\" and \"3.5\" in data['text']:\n if config.DEBUG:\n print(f\"site {url} gpt is 3.5\")\n return True\n else:\n return False\n except Exception as e:\n print(e)\n return False" }, { "identifier": "get_file_line_count", "path": "core/toolkit.py", "snippet": "def get_file_line_count():\n if not os.path.exists(config.GPT_FILEPATH):\n return 0\n with open(config.GPT_FILEPATH, \"r\") as file:\n line_count = sum(1 for line in file)\n return line_count" }, { "identifier": "sort_gpt_sitelist_from_list", "path": "core/toolkit.py", "snippet": "def sort_gpt_sitelist_from_list(urllist):\n with open(config.FILENAME, \"r+\") as file:\n # 将每行的数据拆分成元组 (line, number),并按照数字进行排序\n sorted_lines = sorted(urllist, key=lambda x: float(x.split(\"|\")[1]))\n\n # 清空文件内容\n file.seek(0)\n file.truncate()\n\n # 写入排序结果到文件\n for line in sorted_lines:\n file.write(f\"{line}\")" }, { "identifier": "set_new_gpt_site", "path": "core/toolkit.py", "snippet": "def set_new_gpt_site():\n print('GPT地址重置')\n for sitelist in get_gpt_site():\n submit_thread_task(sitelist)\n sort_gpt_sitelist()" } ]
from apscheduler.schedulers.background import BackgroundScheduler from core.chatgpt_web.chatgpt import ChatGpt from core.toolkit import get_file_line_count, sort_gpt_sitelist_from_list, set_new_gpt_site import ping3 import config import concurrent.futures import threading
2,169
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2023/12/27 18:02 # @Author : Cl0udG0d # @File : background.py # @Github: https://github.com/Cl0udG0d scheduler = BackgroundScheduler() temp_site_list = list() lock = threading.Lock() def check_site_num(): return get_file_line_count() > config.GPT_ALARM_NUM def save_site2list(site): alive, execution_time = ChatGpt.check_alive(site) if alive: with lock: temp_site_list.append(f"{site}|{execution_time}\n") def submit_thread_task(sitelist): with concurrent.futures.ThreadPoolExecutor(max_workers=config.THREADPOOL_NUM) as executor: futures = [executor.submit(save_site2list, site) for site in sitelist] concurrent.futures.wait(futures) def is_connected(host=config.TEST_CONNECT_URL): return True if ping3.ping(host) else False def check_gpt_alive(): global temp_site_list urllist = list() with open(config.GPT_FILEPATH, "r") as file: lines = file.readlines() for line in lines: url = line.strip().split('|')[0] urllist.append(url) submit_thread_task(urllist)
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2023/12/27 18:02 # @Author : Cl0udG0d # @File : background.py # @Github: https://github.com/Cl0udG0d scheduler = BackgroundScheduler() temp_site_list = list() lock = threading.Lock() def check_site_num(): return get_file_line_count() > config.GPT_ALARM_NUM def save_site2list(site): alive, execution_time = ChatGpt.check_alive(site) if alive: with lock: temp_site_list.append(f"{site}|{execution_time}\n") def submit_thread_task(sitelist): with concurrent.futures.ThreadPoolExecutor(max_workers=config.THREADPOOL_NUM) as executor: futures = [executor.submit(save_site2list, site) for site in sitelist] concurrent.futures.wait(futures) def is_connected(host=config.TEST_CONNECT_URL): return True if ping3.ping(host) else False def check_gpt_alive(): global temp_site_list urllist = list() with open(config.GPT_FILEPATH, "r") as file: lines = file.readlines() for line in lines: url = line.strip().split('|')[0] urllist.append(url) submit_thread_task(urllist)
sort_gpt_sitelist_from_list(temp_site_list)
2
2023-12-26 02:44:48+00:00
4k
DidacticFishstick/ultrastar-wingman
main.py
[ { "identifier": "Song", "path": "song.py", "snippet": "class Song:\n songs = {}\n usdb_ids = set()\n php_session_id = None\n\n @staticmethod\n def create_valid_dir_name(s):\n # Remove invalid characters\n s = re.sub(r'[<>:\"/\\\\|?*]', '', s)\n\n # Replace spaces with underscores\n # s = s.replace(' ', '_')\n\n # Truncate to a reasonable length to avoid exceeding max path lengths\n s = s[:255]\n\n # Ensure the name isn't a reserved name in Windows\n reserved_names = [\"CON\", \"PRN\", \"AUX\", \"NUL\", \"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\", \"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\"]\n if s.upper() in reserved_names:\n s = \"_\" + s\n\n return s\n\n @classmethod\n def load_songs(cls):\n for subdir in os.listdir(config.usdx_songs_dir):\n subdir_path = os.path.join(config.usdx_songs_dir, subdir)\n\n if not os.path.isdir(subdir_path):\n continue\n\n try:\n usdb_id = None\n if os.path.isfile(os.path.join(subdir_path, \"usdb_data.json\")):\n with open(os.path.join(subdir_path, \"usdb_data.json\")) as file:\n usdb_data = json.loads(file.read())\n usdb_id = usdb_data.get(\"id\")\n\n txt_files = [f for f in os.listdir(subdir_path) if f.endswith('.txt')]\n\n if not txt_files:\n continue\n\n try:\n txt_path = os.path.join(subdir_path, txt_files[0])\n\n with open(txt_path, 'rb') as file:\n encoding = chardet.detect(file.read())['encoding']\n\n if encoding != 'utf-8':\n logging.warning(f\"Wrong encoding. Is {encoding} instead of utf-8 for '{os.path.join(subdir_path, txt_files[0])}'\")\n\n with open(txt_path, 'r', encoding=encoding) as file:\n txt = file.read()\n\n match = re.search(r'#TITLE:(.*)\\n', txt)\n if match:\n title = match.group(1)\n else:\n logging.warning(f\"No title for {subdir_path}\")\n continue\n\n match = re.search(r'#ARTIST:(.*)\\n', txt)\n if match:\n artist = match.group(1)\n else:\n logging.warning(f\"No artist for {subdir_path}\")\n continue\n\n match = re.search(r'#COVER:(.*)\\n', txt)\n cover = None\n if match:\n cover = match.group(1)\n\n match = re.search(r'#MP3:(.*)\\n', txt)\n mp3 = None\n if match:\n mp3 = match.group(1)\n\n cls(subdir_path, title, artist, usdb_id, cover, mp3)\n except:\n logging.exception(f\"Could not process song in '{subdir_path}'\")\n except:\n logging.exception(f\"Could not process song in '{subdir_path}'\")\n\n @classmethod\n async def download(cls, id):\n response = usdb.session.post(f\"https://usdb.animux.de/index.php?link=gettxt&id={id}\", headers={\"Cookie\": cls.php_session_id}, data={\"wd\": \"1\"})\n response.raise_for_status()\n\n soup = BeautifulSoup(response.content, 'html.parser')\n\n # Extract the value of the input element with the name \"txt\"\n input_element = soup.find('input', {'name': 'txt'})\n\n if input_element:\n txt = input_element['value'].replace(\"\\r\\n\", \"\\n\")\n else:\n raise DownloadException(f\"txt for {id} not found on usdb.animux.de. Are you logged in?\")\n\n # TODO: get only the id, load everything here\n match = re.search(r'#TITLE:(.*)\\n', txt)\n if match:\n title = match.group(1)\n else:\n raise DownloadException(\"missing name\")\n\n match = re.search(r'#ARTIST:(.*)\\n', txt)\n if match:\n artist = match.group(1)\n else:\n raise DownloadException(\"missing artist\")\n\n match = re.search(r'#VIDEO:(.*)\\n', txt)\n if match:\n video = match.group(1)\n else:\n raise DownloadException(\"missing video\")\n\n if id is None:\n sanitized_name = cls.create_valid_dir_name(f\"{artist} - {title}\")\n else:\n sanitized_name = cls.create_valid_dir_name(f\"{artist} - {title} ({id})\")\n\n directory = os.path.join(config.usdx_songs_dir, sanitized_name)\n\n if os.path.exists(directory):\n raise DownloadException(f\"directory '{directory}' exists\")\n\n logging.info(f\"Saving {artist} - {title} ({id}) to {directory}\")\n\n with tempfile.TemporaryDirectory() as tempdir:\n with open(os.path.join(tempdir, f\"usdb_data.json\"), \"w+\") as file:\n file.write(json.dumps({\n \"id\": id\n }))\n\n with open(os.path.join(tempdir, f\"{sanitized_name}.txt\"), \"w+\") as file:\n file.writelines(\"#VIDEO:video.mp4\\n\")\n file.writelines(\"#MP3:song.mp3\\n\")\n file.writelines(\"#COVER:cover.jpg\\n\")\n # TODO: Background\n # file.writelines(\"#BACKGROUND:background.jpg\\n\")\n for line in txt.split(\"\\n\"):\n if not any(line.startswith(s) for s in [\"#VIDEO\", \"#MP3\", \"#COVER\", \"#BACKGROUND\"]):\n file.writelines(line + \"\\n\")\n\n match = re.search(r'[va]=([a-zA-Z0-9_-]+)', video)\n if match:\n url = f\"https://www.youtube.com/watch?v={match.group(1)}\"\n else:\n raise DownloadException(f\"no video url found in txt\")\n\n process = await asyncio.create_subprocess_exec(\n \"curl\", \"-o\", \"cover.jpg\", f\"https://usdb.animux.de/data/cover/{id}.jpg\",\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n cwd=tempdir\n )\n stdout, stderr = await process.communicate()\n\n if process.returncode != 0:\n raise DownloadException(f\"cover download failed with code {process.returncode}, stdout: {stdout.decode()}, stderr: {stderr.decode()}\")\n\n # try:\n # subprocess.run([\"curl\", \"-o\", \"cover.jpg\", f\"https://usdb.animux.de/data/cover/{id}.jpg\"], cwd=tempdir, check=True)\n # except Exception as e:\n # raise DownloadException(f\"cover download failed: {e}\")\n\n process = await asyncio.create_subprocess_exec(\n config.youtube_dl, \"-o\", \"video.mp4\", \"--format\", \"mp4\", url,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n cwd=tempdir\n )\n stdout, stderr = await process.communicate()\n\n if process.returncode != 0:\n raise DownloadException(f\"youtube-dl failed with code {process.returncode}, stdout: {stdout.decode()}, stderr: {stderr.decode()}\")\n\n # try:\n # subprocess.run([config.youtube_dl, \"-o\", \"video.mp4\", \"--format\", \"mp4\", url], cwd=tempdir, check=True)\n # except Exception as e:\n # raise DownloadException(f\"youtube-dl failed: {e}\")\n\n process = await asyncio.create_subprocess_exec(\n config.ffmpeg, \"-i\", \"video.mp4\", \"-vn\", \"-acodec\", \"libmp3lame\", \"-ac\", \"2\", \"-ab\", \"160k\", \"-ar\", \"48000\", \"song.mp3\",\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n cwd=tempdir\n )\n stdout, stderr = await process.communicate()\n\n if process.returncode != 0:\n raise DownloadException(f\"ffmpeg failed with code {process.returncode}, stdout: {stdout.decode()}, stderr: {stderr.decode()}\")\n\n # try:\n # subprocess.run([config.ffmpeg, \"-i\", \"video.mp4\", \"-vn\", \"-acodec\", \"libmp3lame\", \"-ac\", \"2\", \"-ab\", \"160k\", \"-ar\", \"48000\", \"song.mp3\"], cwd=tempdir, check=True)\n # except Exception as e:\n # raise DownloadException(f\"ffmpeg failed: {e}\")\n\n os.makedirs(directory)\n\n for file_name in os.listdir(tempdir):\n source = os.path.join(tempdir, file_name)\n destination = os.path.join(directory, file_name)\n shutil.move(source, destination)\n\n return cls(directory, title, artist, id, \"cover.jpg\", \"song.mp3\")\n\n @classmethod\n def song_list(cls) -> List[dict]:\n return [s.to_json() for s in cls.songs.values()]\n\n @classmethod\n def get_song_by_id(cls, id) -> 'Song':\n return cls.songs.get(str(id))\n\n @staticmethod\n def get_mp3_length(filename):\n audiofile = eyed3.load(filename)\n duration = audiofile.info.time_secs\n return duration\n\n def __init__(self, directory: str, title: str, artist: str, usdb_id: Optional[str] = None, cover: Optional[str] = None, mp3: Optional[str] = None):\n \"\"\"\n Creates a new song from the information found in the directory\n\n :param directory: The directory to the song directory\n :param title: The song title\n :param artist: The artist\n :param usdb_id: An optional ID of the song on usdb.animux.de/\n \"\"\"\n\n self.directory = directory\n self.title = title\n self.artist = artist\n self.usdb_id = usdb_id\n self.cover = cover\n self.mp3 = mp3\n self.duration = self.get_mp3_length(os.path.join(directory, mp3))\n\n if cover:\n self.cover_path = os.path.join(directory, cover)\n else:\n self.cover_path = None\n\n self.id = usdb_id or uuid.uuid4().hex\n\n self.songs[str(self.id)] = self\n\n if usdb_id is not None:\n self.usdb_ids.add(usdb_id)\n\n def __str__(self):\n if self.usdb_id is not None:\n return f\"{self.title} - {self.artist} ({self.usdb_id})\"\n return f\"{self.title} - {self.artist}\"\n\n def __repr__(self):\n if self.usdb_id is not None:\n return f\"[Song '{self.title} - {self.artist}' ({self.usdb_id})]\"\n return f\"[Song '{self.title} - {self.artist}']\"\n\n def to_json(self):\n return {\n \"directory\": self.directory,\n \"title\": self.title,\n \"artist\": self.artist,\n \"usdb_id\": self.usdb_id,\n \"id\": self.id,\n \"duration\": self.duration\n }" }, { "identifier": "WebSocketServer", "path": "websocket_server.py", "snippet": "class WebSocketServer:\n def __init__(self, download_queue: asyncio.Queue):\n async def register(self, websocket):\n async def unregister(self, websocket):\n async def send_to_clients(self, message):\n async def handler(self, websocket, path):\n async def message_queue_consumer(self):\n async def download_queue_consumer(self, i):" } ]
import getpass import os import asyncio import json import logging import os.path import platform import signal import subprocess import threading import websockets import config import usdb import usdx from flask import render_template, Flask, request, send_file from song import Song from websocket_server import WebSocketServer, messages
3,108
SCRIPT_BASE_PATH = os.path.abspath(os.path.dirname(__file__)) app = Flask(__name__, static_folder=os.path.join(SCRIPT_BASE_PATH, "static"), template_folder=os.path.join(SCRIPT_BASE_PATH, "templates")) usdx_process = None download_queue = asyncio.Queue() event_loop = asyncio.get_event_loop() php_session_id = None def restart_usdx(): global usdx_process if usdx_process is not None: logging.info("Stopping USDX") if platform.system() == "Windows": subprocess.call(['taskkill', '/F', '/T', '/PID', str(usdx_process.pid)]) else: os.kill(usdx_process.pid, signal.SIGKILL) logging.info("Starting USDX") usdx_process = subprocess.Popen(str(config.usdx_path)) @app.route('/') def index():
SCRIPT_BASE_PATH = os.path.abspath(os.path.dirname(__file__)) app = Flask(__name__, static_folder=os.path.join(SCRIPT_BASE_PATH, "static"), template_folder=os.path.join(SCRIPT_BASE_PATH, "templates")) usdx_process = None download_queue = asyncio.Queue() event_loop = asyncio.get_event_loop() php_session_id = None def restart_usdx(): global usdx_process if usdx_process is not None: logging.info("Stopping USDX") if platform.system() == "Windows": subprocess.call(['taskkill', '/F', '/T', '/PID', str(usdx_process.pid)]) else: os.kill(usdx_process.pid, signal.SIGKILL) logging.info("Starting USDX") usdx_process = subprocess.Popen(str(config.usdx_path)) @app.route('/') def index():
return render_template('index.html', messages=messages)
1
2023-12-23 15:29:44+00:00
4k
Q-MM/PureMM
eval/model_vqa.py
[ { "identifier": "conv_templates", "path": "model/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "tokenizer_image_token", "path": "model/mm_utils.py", "snippet": "def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids" }, { "identifier": "process_images", "path": "model/mm_utils.py", "snippet": "def process_images(images, image_processor, model_cfg):\n image_aspect_ratio = getattr(model_cfg, \"image_aspect_ratio\", None)\n new_images = []\n if image_aspect_ratio == 'pad':\n for image in images:\n image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))\n image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]\n new_images.append(image)\n else:\n return image_processor(images, return_tensors='pt')['pixel_values']\n if all(x.shape == new_images[0].shape for x in new_images):\n new_images = torch.stack(new_images, dim=0)\n return new_images" }, { "identifier": "get_model_name_from_path", "path": "model/mm_utils.py", "snippet": "def get_model_name_from_path(model_path):\n model_path = model_path.strip(\"/\")\n model_paths = model_path.split(\"/\")\n if model_paths[-1].startswith('checkpoint-'):\n return model_paths[-2] + \"_\" + model_paths[-1]\n else:\n return model_paths[-1]" } ]
import argparse import torch import os import json import math import logging import warnings from tqdm import tqdm from model.conversation import conv_templates, SeparatorStyle from model.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path from PIL import Image from transformers import AutoTokenizer, AutoConfig, BitsAndBytesConfig from ..model import * from peft import PeftModel
2,438
if 'lora' in model_name.lower() and model_base is None: warnings.warn( 'There is `lora` in model_zoo name but no `model_base` is provided. ') if 'lora' in model_name.lower() and model_base is not None: lora_cfg_pretrained = AutoConfig.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) print('Loading PureMM from base model_zoo...') model = PureMMLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs) token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features if model.lm_head.weight.shape[0] != token_num: print(f'model_zoo.lm_head.weight.shape[0]: {model.lm_head.weight.shape[0]}; token_num: {token_num}') model.lm_head.weight = torch.nn.Parameter( torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) model.model.embed_tokens.weight = torch.nn.Parameter( torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) print('Loading additional PureMM weights...') if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')): non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu') non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()} if any(k.startswith('model_zoo.model_zoo.') for k in non_lora_trainables): non_lora_trainables = {(k[6:] if k.startswith('model_zoo.') else k): v for k, v in non_lora_trainables.items()} incompatible_keys = model.load_state_dict(non_lora_trainables, strict=False) # print("non_lora_trainables incompatible_keys: ", incompatible_keys) # vision_tower 在lora载入之前load,验证visual encoder lora训练效果 vision_tower = model.get_vision_tower() print(f'vision_tower.is_loaded: {vision_tower.is_loaded}') if not vision_tower.is_loaded: vision_tower.load_model() print(f'vision_tower loaded!!!!') # print(f'model_zoo: {model_zoo}') print('Loading LoRA weights...') model = PeftModel.from_pretrained(model, model_path) # print(f'model_zoo after get lora: {model_zoo}') print('Merging LoRA weights...') model = model.merge_and_unload() # print(f'model_zoo after merge with lora: {model_zoo}') print('Model is loaded...') vision_tower = model.get_vision_tower() print(f'vision_tower.is_loaded: {vision_tower.is_loaded}') if not vision_tower.is_loaded: vision_tower.load_model() print(f'vision_tower loaded!!!!') vision_tower.to(device=device, dtype=torch.float16) image_processor = vision_tower.image_processor print(f'image_processor: {image_processor}') if hasattr(model.config, "max_sequence_length"): context_len = model.config.max_sequence_length else: context_len = 2048 return tokenizer, model, image_processor, context_len def eval_model(args): # Model disable_torch_init() model_path = os.path.expanduser(args.model_path) model_name = get_model_name_from_path(model_path) print(f'model_name: {model_name}') tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name) print('load model_zoo done!!!') # questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")] questions_json = json.load(open(os.path.expanduser(args.question_file), "r")) image_dir = questions_json.get('root_dir', None) if 'mini_benchmark_IT_SFT_v1.2' in args.question_file: questions = questions_json.get('annotations') else: questions = questions_json.get('questions') # questions = get_chunk(questions, args.num_chunks, args.chunk_idx) if not os.path.exists(args.answers_dir): logging.error(f'answers_dir not exist: {args.answers_dir}') os.mkdir(args.answers_dir) print('answers_dir: ', args.answers_dir) answers_file = os.path.join(args.answers_dir, os.path.basename(args.question_file)) answers_file = answers_file.replace('.json', '_result.json') print('answers_file: ', answers_file) # answers_file = os.path.expanduser(args.answers_file) # os.makedirs(os.path.dirname(answers_file), exist_ok=True) ans_file = open(answers_file, "w") for line in tqdm(questions): idx = line["question_id"] image_file = line["image"] # qs = line["text"] qs = line["question"] gt = line['answer'] if 'mini_benchmark_IT_SFT_v1.2' in args.question_file: # qs = qs.replace('Please answer yes or no.', '') qs = qs.replace(' Please answer yes or no.', '\nAnswer the question using a single word or phrase.') cur_prompt = qs qs = DEFAULT_IMAGE_TOKEN + '\n' + qs conv = conv_templates[args.conv_mode].copy() conv.append_message(conv.roles[0], qs) conv.append_message(conv.roles[1], None) prompt = conv.get_prompt() input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() # image = Image.open(os.path.join(args.image_folder, image_file)) if image_dir: image_path = os.path.join(image_dir, image_file) else: image_path = os.path.join(args.image_folder, image_file) image = Image.open(image_path).convert('RGB') # image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
IMAGE_TOKEN_INDEX = -200 DEFAULT_IMAGE_TOKEN = "<image>" def split_list(lst, n): """Split a list into n (roughly) equal-sized chunks""" chunk_size = math.ceil(len(lst) / n) # integer division return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] def get_chunk(lst, n, k): chunks = split_list(lst, n) return chunks[k] def disable_torch_init(): """ Disable the redundant torch default initialization to accelerate model_zoo creation. """ setattr(torch.nn.Linear, "reset_parameters", lambda self: None) setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None) def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda"): kwargs = {"device_map": device_map} if load_8bit: kwargs['load_in_8bit'] = True elif load_4bit: kwargs['load_in_4bit'] = True kwargs['quantization_config'] = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type='nf4' ) else: kwargs['torch_dtype'] = torch.float16 if 'lora' in model_name.lower() and model_base is None: warnings.warn( 'There is `lora` in model_zoo name but no `model_base` is provided. ') if 'lora' in model_name.lower() and model_base is not None: lora_cfg_pretrained = AutoConfig.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) print('Loading PureMM from base model_zoo...') model = PureMMLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs) token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features if model.lm_head.weight.shape[0] != token_num: print(f'model_zoo.lm_head.weight.shape[0]: {model.lm_head.weight.shape[0]}; token_num: {token_num}') model.lm_head.weight = torch.nn.Parameter( torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) model.model.embed_tokens.weight = torch.nn.Parameter( torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) print('Loading additional PureMM weights...') if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')): non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu') non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()} if any(k.startswith('model_zoo.model_zoo.') for k in non_lora_trainables): non_lora_trainables = {(k[6:] if k.startswith('model_zoo.') else k): v for k, v in non_lora_trainables.items()} incompatible_keys = model.load_state_dict(non_lora_trainables, strict=False) # print("non_lora_trainables incompatible_keys: ", incompatible_keys) # vision_tower 在lora载入之前load,验证visual encoder lora训练效果 vision_tower = model.get_vision_tower() print(f'vision_tower.is_loaded: {vision_tower.is_loaded}') if not vision_tower.is_loaded: vision_tower.load_model() print(f'vision_tower loaded!!!!') # print(f'model_zoo: {model_zoo}') print('Loading LoRA weights...') model = PeftModel.from_pretrained(model, model_path) # print(f'model_zoo after get lora: {model_zoo}') print('Merging LoRA weights...') model = model.merge_and_unload() # print(f'model_zoo after merge with lora: {model_zoo}') print('Model is loaded...') vision_tower = model.get_vision_tower() print(f'vision_tower.is_loaded: {vision_tower.is_loaded}') if not vision_tower.is_loaded: vision_tower.load_model() print(f'vision_tower loaded!!!!') vision_tower.to(device=device, dtype=torch.float16) image_processor = vision_tower.image_processor print(f'image_processor: {image_processor}') if hasattr(model.config, "max_sequence_length"): context_len = model.config.max_sequence_length else: context_len = 2048 return tokenizer, model, image_processor, context_len def eval_model(args): # Model disable_torch_init() model_path = os.path.expanduser(args.model_path) model_name = get_model_name_from_path(model_path) print(f'model_name: {model_name}') tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name) print('load model_zoo done!!!') # questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")] questions_json = json.load(open(os.path.expanduser(args.question_file), "r")) image_dir = questions_json.get('root_dir', None) if 'mini_benchmark_IT_SFT_v1.2' in args.question_file: questions = questions_json.get('annotations') else: questions = questions_json.get('questions') # questions = get_chunk(questions, args.num_chunks, args.chunk_idx) if not os.path.exists(args.answers_dir): logging.error(f'answers_dir not exist: {args.answers_dir}') os.mkdir(args.answers_dir) print('answers_dir: ', args.answers_dir) answers_file = os.path.join(args.answers_dir, os.path.basename(args.question_file)) answers_file = answers_file.replace('.json', '_result.json') print('answers_file: ', answers_file) # answers_file = os.path.expanduser(args.answers_file) # os.makedirs(os.path.dirname(answers_file), exist_ok=True) ans_file = open(answers_file, "w") for line in tqdm(questions): idx = line["question_id"] image_file = line["image"] # qs = line["text"] qs = line["question"] gt = line['answer'] if 'mini_benchmark_IT_SFT_v1.2' in args.question_file: # qs = qs.replace('Please answer yes or no.', '') qs = qs.replace(' Please answer yes or no.', '\nAnswer the question using a single word or phrase.') cur_prompt = qs qs = DEFAULT_IMAGE_TOKEN + '\n' + qs conv = conv_templates[args.conv_mode].copy() conv.append_message(conv.roles[0], qs) conv.append_message(conv.roles[1], None) prompt = conv.get_prompt() input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() # image = Image.open(os.path.join(args.image_folder, image_file)) if image_dir: image_path = os.path.join(image_dir, image_file) else: image_path = os.path.join(args.image_folder, image_file) image = Image.open(image_path).convert('RGB') # image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
image_tensor = process_images([image], image_processor, model.config)[0]
2
2023-12-27 09:54:09+00:00
4k
giaminhgist/3D-DAM
lib/training/train.py
[ { "identifier": "AverageMeter", "path": "lib/utils/utils.py", "snippet": "class AverageMeter:\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count" }, { "identifier": "accuracy", "path": "lib/utils/utils.py", "snippet": "def accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n maxk = min(max(topk), output.size()[1])\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]" }, { "identifier": "EarlyStopping", "path": "lib/utils/EarlyStopping.py", "snippet": "class EarlyStopping:\n \"\"\"Early stops the training if validation loss doesn't improve after a given patience.\"\"\"\n\n def __init__(self, patience=7, verbose=False, delta=0, path='checkpoint.pt', trace_func=print):\n \"\"\"\n Args:\n patience (int): How long to wait after last time validation loss improved.\n Default: 7\n verbose (bool): If True, prints a message for each validation loss improvement.\n Default: False\n delta (float): Minimum change in the monitored quantity to qualify as an improvement.\n Default: 0\n path (str): Path for the checkpoint to be saved to.\n Default: 'checkpoint.pt'\n trace_func (function): trace print function.\n Default: print\n \"\"\"\n self.patience = patience\n self.verbose = verbose\n self.counter = 0\n self.best_score = None\n self.early_stop = False\n self.val_loss_min = np.Inf\n self.delta = delta\n self.path = path\n self.trace_func = trace_func\n\n def __call__(self, val_loss, model):\n\n score = -val_loss\n\n if self.best_score is None:\n self.best_score = score\n self.save_checkpoint(val_loss, model)\n elif score < self.best_score + self.delta:\n self.counter += 1\n self.trace_func(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n if self.counter >= self.patience:\n self.early_stop = True\n else:\n self.best_score = score\n self.save_checkpoint(val_loss, model)\n self.counter = 0\n\n def save_checkpoint(self, val_loss, model):\n '''Saves model when validation loss decrease.'''\n if self.verbose:\n self.trace_func(\n f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')\n torch.save(model.state_dict(), self.path)\n self.val_loss_min = val_loss" }, { "identifier": "plot_result", "path": "lib/training/train_helper.py", "snippet": "def plot_result(title, val_list, train_list, type_data='Loss'):\n fig = plt.figure(figsize=(10, 10), dpi=500)\n plt.title(f'{title}')\n plt.plot(val_list, label=\"val\")\n plt.plot(train_list, label=\"train\")\n plt.xlabel(\"iterations\")\n plt.ylabel(f'{type_data}')\n if type_data == 'Loss':\n plt.ylim(0, 3)\n else:\n plt.ylim(40, 100)\n plt.legend()\n fig.savefig(f'{title}_{type_data}.png', bbox_inches='tight')\n plt.close(fig)" } ]
import numpy as np import torch import torch.nn.functional as F from torch import nn from collections import OrderedDict from lib.utils.utils import AverageMeter, accuracy from lib.utils.EarlyStopping import EarlyStopping from lib.training.train_helper import plot_result from sklearn.metrics import confusion_matrix from tqdm import tqdm
1,675
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def train_one_epoch( model, loader, optimizer, epoch_idx: int, lr_scheduler=None, ): losses_m = AverageMeter() acc_m = AverageMeter() model.train() print('Start training epoch: ', epoch_idx) for batch_idx, data in enumerate(tqdm(loader)): images, target = data images, target = images.to(device), target.to(device) target = target.flatten() output = model(images) loss = nn.CrossEntropyLoss()(output, target) losses_m.update(loss.item(), images.size(0)) acc1 = accuracy(output, target, topk=(1,)) acc_m.update(acc1[0].item(), output.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() torch.cuda.synchronize() print(optimizer.param_groups[0]['lr']) if hasattr(optimizer, 'sync_lookahead'): optimizer.sync_lookahead() metrics = OrderedDict([('loss', losses_m.avg), ('Acc', acc_m.avg)]) if lr_scheduler is not None: lr_scheduler.step() return metrics def validate(model, loader): losses_m = AverageMeter() acc_m = AverageMeter() model.eval() with torch.no_grad(): for batch_idx, data in enumerate(loader): images, target = data images, target = images.to(device), target.to(device) target = target.flatten() output = model(images) loss = nn.CrossEntropyLoss()(output, target) acc1 = accuracy(output, target, topk=(1,)) # reduced_loss = loss.data torch.cuda.synchronize() losses_m.update(loss.item(), images.size(0)) acc_m.update(acc1[0].item(), output.size(0)) metrics = OrderedDict([('loss', losses_m.avg), ('Acc', acc_m.avg)]) return metrics def train(model, train_loader, val_loader, epoch_size=300, lr_scheduler=True, learning_rate=1e-7, optimizer_setup='Adam', w_decay=1e-7, patience=20, save_last=True, name='save', fold=0, ): seed = 42 torch.manual_seed(seed) np.random.seed(seed) print('Training using:', device) model = torch.nn.DataParallel(model) model.to(device) if optimizer_setup == 'Adam': optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=w_decay) elif optimizer_setup == 'SGD': optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=w_decay) else: optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=w_decay) min_valid_loss = np.inf max_acc = 0 highest_val_epoch = 0 train_acc = [] train_losses = [] val_acc = [] val_losses = [] if lr_scheduler: print('Applied lr_scheduler') scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1) else: scheduler = None
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def train_one_epoch( model, loader, optimizer, epoch_idx: int, lr_scheduler=None, ): losses_m = AverageMeter() acc_m = AverageMeter() model.train() print('Start training epoch: ', epoch_idx) for batch_idx, data in enumerate(tqdm(loader)): images, target = data images, target = images.to(device), target.to(device) target = target.flatten() output = model(images) loss = nn.CrossEntropyLoss()(output, target) losses_m.update(loss.item(), images.size(0)) acc1 = accuracy(output, target, topk=(1,)) acc_m.update(acc1[0].item(), output.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() torch.cuda.synchronize() print(optimizer.param_groups[0]['lr']) if hasattr(optimizer, 'sync_lookahead'): optimizer.sync_lookahead() metrics = OrderedDict([('loss', losses_m.avg), ('Acc', acc_m.avg)]) if lr_scheduler is not None: lr_scheduler.step() return metrics def validate(model, loader): losses_m = AverageMeter() acc_m = AverageMeter() model.eval() with torch.no_grad(): for batch_idx, data in enumerate(loader): images, target = data images, target = images.to(device), target.to(device) target = target.flatten() output = model(images) loss = nn.CrossEntropyLoss()(output, target) acc1 = accuracy(output, target, topk=(1,)) # reduced_loss = loss.data torch.cuda.synchronize() losses_m.update(loss.item(), images.size(0)) acc_m.update(acc1[0].item(), output.size(0)) metrics = OrderedDict([('loss', losses_m.avg), ('Acc', acc_m.avg)]) return metrics def train(model, train_loader, val_loader, epoch_size=300, lr_scheduler=True, learning_rate=1e-7, optimizer_setup='Adam', w_decay=1e-7, patience=20, save_last=True, name='save', fold=0, ): seed = 42 torch.manual_seed(seed) np.random.seed(seed) print('Training using:', device) model = torch.nn.DataParallel(model) model.to(device) if optimizer_setup == 'Adam': optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=w_decay) elif optimizer_setup == 'SGD': optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=w_decay) else: optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=w_decay) min_valid_loss = np.inf max_acc = 0 highest_val_epoch = 0 train_acc = [] train_losses = [] val_acc = [] val_losses = [] if lr_scheduler: print('Applied lr_scheduler') scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1) else: scheduler = None
early_stopping = EarlyStopping(patience=patience, verbose=True)
2
2023-12-22 10:15:55+00:00
4k
gardenifi/server
tests/api/discover_wifi_test.py
[ { "identifier": "discover_wifi", "path": "app/main_app.py", "snippet": "@app.get(\"/api/discover_wifi\")\nasync def discover_wifi(chunked: int = None, page: int = None):\n \"\"\"WIFI discovery API call.\"\"\"\n try:\n if chunked is not None:\n if page is None:\n return JSONResponse(status_code=200, content=services.discover_wifi_networks(chunked))\n return JSONResponse(status_code=200, content=services.discover_wifi_networks(chunked, page))\n return JSONResponse(status_code=200, content=services.discover_wifi_networks())\n except Exception as ex:\n raise HTTPException(status_code=500, detail=str(ex)) from ex" }, { "identifier": "Services", "path": "app/raspi/services.py", "snippet": "class Services:\n \"\"\"\n The `Services` class provides various methods for managing and controlling\n services related to a Raspberry Pi device, such as turning on/off valves,\n storing and deleting program cycles, loading program cycles, discovering\n WiFi networks, and saving WiFi network configurations.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self._scheduler = BackgroundScheduler()\n self._scheduler_started = False\n\n @property\n def scheduler_started(self):\n \"\"\"getter\"\"\"\n return self._scheduler_started\n\n @scheduler_started.setter\n def scheduler_started(self, value):\n \"\"\"setter\"\"\"\n self._scheduler_started = value\n\n @property\n def scheduler(self):\n \"\"\"getter\"\"\"\n return self._scheduler\n\n @scheduler.setter\n def scheduler(self, value):\n \"\"\"setter\"\"\"\n self._scheduler = value\n\n def turn_on_from_program(self, valve):\n \"\"\"\n Turn on a valve based on the program.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n None\n \"\"\"\n return Helpers().toggle(2, \"out\" + str(valve))\n\n def turn_off_from_program(self, valve):\n \"\"\"\n Turn off a valve based on the program.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n None\n \"\"\"\n return Helpers().toggle(0, \"out\" + str(valve))\n\n def get_stop_datetime(self, day, start_hour, start_min, period):\n \"\"\"\n Calculate the stop time for a program cycle.\n\n Parameters:\n - day (str): The day of the week.\n - start_hour (int): The starting hour.\n - start_min (int): The starting minute.\n - period (int): The duration of the cycle in minutes.\n\n Returns:\n tuple: A tuple containing the stop day, stop hour, and stop minute.\n \"\"\"\n logger.debug(f\"Converting to correct day, start, stop: {day}, {start_hour}, {start_min}, {period}\")\n stop_day_index = DAYS.index(day)\n logger.debug(f\"stop_day_index {stop_day_index}\")\n\n stop_min = (start_min + period) % 60\n logger.debug(f\"stop_min {stop_min}\")\n\n if stop_min < start_min:\n # should go to the next hour\n stop_hour = (start_hour + 1) % 24\n # should go to the next day\n if stop_hour < start_hour:\n stop_day_index = (stop_day_index + 1) % 7\n else:\n stop_hour = start_hour\n\n logger.debug(f\"stop_hour {stop_hour}\")\n\n stop_day = DAYS[stop_day_index]\n logger.debug(f\"stop_day: {stop_day}\")\n\n return stop_day, stop_hour, stop_min\n\n def store_program_cycles(self, json_data, store=False) -> None:\n \"\"\"\n Store program cycles and schedule them using the scheduler.\n\n Parameters:\n - json_data (dict): JSON data containing program information.\n - store (bool, optional): Whether to store the program information. Default is False.\n\n Returns:\n None\n \"\"\"\n try:\n triggers_to_start = []\n triggers_to_stop = []\n for day in json_data[\"days\"].split(\",\"):\n if day not in DAYS:\n raise DayValueException(f\"{day} is not correct! Accepted values: {DAYS}\")\n for cycle in json_data[\"cycles\"]:\n logger.info(f\"Cycle: {cycle}\")\n if int(cycle[\"min\"]) <= 0:\n logger.info(\"This cycle should not be considered to be in the program due to min <=0.\")\n continue\n start_hour = cycle[\"start\"].split(\":\")[0]\n start_min = cycle[\"start\"].split(\":\")[1]\n\n logger.info(f\"Start: {day} at {start_hour}:{start_min}\")\n triggers_to_start.append(CronTrigger(day_of_week=day, hour=int(start_hour), minute=int(start_min)))\n\n stop_day, stop_hour, stop_min = self.get_stop_datetime(day, int(start_hour), int(start_min), int(cycle[\"min\"]))\n logger.info(f\"Stop: {stop_day} at {stop_hour}:{stop_min}\")\n triggers_to_stop.append(CronTrigger(day_of_week=stop_day, hour=stop_hour, minute=stop_min))\n\n logger.info(f\"FINAL Triggers To Start to be in the program:{triggers_to_start}\")\n logger.info(f\"FINAL Triggers To Stop to be in the program: {triggers_to_stop}\")\n\n self._scheduler.add_job(self.turn_on_from_program, OrTrigger(triggers_to_start), args=[json_data[\"out\"]])\n self._scheduler.add_job(self.turn_off_from_program, OrTrigger(triggers_to_stop), args=[json_data[\"out\"]])\n\n if not self._scheduler_started:\n self._scheduler.start()\n self._scheduler_started = True\n\n if store is True:\n file_path = PROGRAM + str(json_data[\"out\"]) + PROGRAM_EXT\n with open(file_path, \"w\", encoding=\"utf-8\") as outfile:\n json.dump(json_data, outfile)\n outfile.close()\n\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def delete_program(self, valve) -> bool:\n \"\"\"\n Delete a stored program for a specific valve.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n bool: True if the program was deleted, False otherwise.\n \"\"\"\n file_path = PROGRAM + str(valve) + PROGRAM_EXT\n logger.info(f\"Looking for {file_path} to delete!\")\n if path.exists(file_path):\n logger.info(f\"{file_path} exists! Deleting it...\")\n remove(file_path)\n return True\n return False\n\n def load_program_cycles_if_exists(self, valve):\n \"\"\"\n Load program cycles for a valve if a stored program exists.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n dict or None: The loaded JSON data or None if no program exists.\n \"\"\"\n file_path = PROGRAM + str(valve) + PROGRAM_EXT\n logger.info(f\"Loading {file_path} if exists!\")\n json_data = None\n if path.exists(file_path):\n logger.info(f\"{file_path} exists!\")\n with open(file_path, encoding=\"utf-8\") as json_file:\n json_data = json.load(json_file)\n self.store_program_cycles(json_data)\n json_file.close()\n if not self._scheduler_started:\n self._scheduler.start()\n self._scheduler_started = True\n return json_data\n\n def split_json_into_chunks(self, selected_page, ap_array):\n \"\"\"\n Split a JSON array into chunks and create a response JSON.\n\n Parameters:\n - selected_page (int): The requested page number.\n - ap_array (list): The array to be split.\n\n Returns:\n dict: The response JSON containing the specified page and network information.\n \"\"\"\n selected_page = int(selected_page)\n json_response = {\n \"hw_id\": RPI_HW_ID,\n \"mqtt_broker\": {\"host\": MQTT_HOST, \"port\": int(MQTT_PORT), \"user\": MQTT_USER, \"pass\": MQTT_PASS},\n \"page\": selected_page,\n \"nets\": {},\n \"pages\": 0,\n }\n json_response_to_send = json_response.copy()\n\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n logger.debug(f\"Initial JSON response headers size: {headers_size} bytes\")\n\n pages = 1\n current_chunk_size = headers_size\n json_array = []\n\n for item in ap_array:\n json_response[\"pages\"] = pages\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n item_size = len(json.dumps(item).encode(\"utf-8\"))\n logger.debug(\n \"JSON item size: \"\n + f\"{item_size} bytes, \"\n + \"current_chunk_size: \"\n + f\"{current_chunk_size} bytes, \"\n + \"total: \"\n + f\"{current_chunk_size + item_size} bytes\"\n )\n if current_chunk_size + item_size >= MAX_NUM_OF_BYTES_CHUNK - MAX_NUM_OF_BUFFER_TO_ADD:\n pages += 1\n json_response[\"pages\"] = pages\n json_array = [item]\n json_response[\"nets\"] = json_array\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n current_chunk_size = headers_size + item_size + len(\", \")\n logger.debug(\n f\"Found total >= {MAX_NUM_OF_BYTES_CHUNK}: \"\n f\"Creating a new page: {pages}. \"\n f\"Current chunk size: {current_chunk_size} bytes\"\n )\n else:\n json_array.append(item)\n current_chunk_size += item_size + len(\", \")\n if selected_page == pages:\n json_response_to_send[\"nets\"] = json_array\n\n json_response_to_send[\"pages\"] = pages\n logger.debug(f\"JSON response size: {headers_size}\")\n logger.debug(\n f\"Nets array for this page ({pages}): {json_array}. \"\n f\"Current nets array size: {len(json.dumps(json_array).encode('utf-8'))} bytes, \"\n f\"Current chunk size: {current_chunk_size} bytes\"\n )\n\n if not json_response[\"nets\"]:\n json_response_to_send[\"nets\"] = json_array\n\n logger.debug(f\"JSON total size: {len(json.dumps(json_response_to_send).encode('utf-8'))}\")\n return json_response_to_send\n\n def discover_wifi_networks(self, chunked=0, page=1, refresh_networks_file=False):\n \"\"\"\n Discover available WiFi networks and return the information.\n\n Parameters:\n - chunked (int, optional): Whether to split the response into chunks. Default is 0.\n - page (int, optional): The requested page number. Default is 1.\n - refresh_networks_file (bool, optional): Whether to refresh the networks file. Default is False.\n\n Returns:\n str or dict: The JSON response containing WiFi network information.\n \"\"\"\n try:\n if page > 1:\n refresh_networks_file = False\n json_response = {}\n ap_array = []\n retries = 0\n while retries < 30:\n retries = retries + 1\n ap_array = Helpers().scan_rpi_wifi_networks(refresh_networks_file)\n if len(ap_array) != 0:\n break\n\n json_response = json.dumps(\n {\n \"hw_id\": RPI_HW_ID,\n \"mqtt_broker\": {\"host\": MQTT_HOST, \"port\": int(MQTT_PORT), \"user\": MQTT_USER, \"pass\": MQTT_PASS},\n \"ap_array\": ap_array,\n }\n )\n\n logger.info(f\"json_response: {json_response}\")\n if chunked == 0:\n return json_response\n logger.info(f\"Split array into chunks of {MAX_NUM_OF_BYTES_CHUNK} bytes...\")\n json_response = self.split_json_into_chunks(page, ap_array)\n return json_response\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def save_wifi_network(self, ssid, wifi_key):\n \"\"\"\n Save WiFi network information.\n\n Parameters:\n - request_data (dict): The request data containing WiFi network information.\n\n Returns:\n str: \"OK\" if successful, \"NOT_OK\" otherwise.\n \"\"\"\n try:\n if ARCH == \"arm\":\n if ssid and wifi_key:\n Helpers().store_wpa_ssid_key(ssid, wifi_key)\n return \"OK\"\n raise ValueError(\"Error: You need to provide ssid and wifi_keys in POST data\")\n raise TypeError(f\"{ARCH} architecture is not supported!!!\")\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def save_wifi_network_with_wpa(self, wpa_enabled, wpa_key):\n \"\"\"\n Save WiFi network information with WPA settings.\n\n Parameters:\n - request_params (dict): The request parameters containing WPA settings.\n\n Returns:\n str: \"OK\" if successful, \"NOT_OK\" otherwise.\n \"\"\"\n try:\n if ARCH == \"arm\":\n logger.info(f\"wpa_enabled: {wpa_enabled}, wpa_key: {wpa_key}\")\n if str(wpa_enabled) == \"1\":\n Helpers().update_wpa_supplicant(1, wpa_key)\n else:\n Helpers().update_wpa_supplicant(0, wpa_key)\n\n thread = Thread(target=Helpers().sleep_and_reboot_for_wpa)\n thread.start()\n return \"OK\"\n raise TypeError(f\"{ARCH} architecture is not supported!!!\")\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise" } ]
import json import pytest from app.main_app import discover_wifi from app.raspi.services import Services
3,565
"""MIT License Copyright (c) 2023, Marios Karagiannopoulos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. **Attribution Requirement:** When using or distributing the software, an attribution to Marios Karagiannopoulos must be included. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """
"""MIT License Copyright (c) 2023, Marios Karagiannopoulos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. **Attribution Requirement:** When using or distributing the software, an attribution to Marios Karagiannopoulos must be included. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """
services = Services()
1
2023-12-22 08:06:09+00:00
4k
xiaoye0x0/pfgo_tg_bot
apps/all_status/view.py
[ { "identifier": "str_in_list_str", "path": "utils/base.py", "snippet": "def str_in_list_str(target: str, target_list: list) -> bool:\n \"\"\"检测字符串中是否存在list中字符串\"\"\"\n for target_str in target_list:\n if target_str in target:\n return True\n return False" }, { "identifier": "PfgoSpider", "path": "utils/pfgo_spider/spider.py", "snippet": "class PfgoSpider:\n def __init__(self, url: str, username: str, password: str) -> None:\n self.logger = Logmanager.create_logger(\"PfgoSpider\")\n self.url = url\n self.username = username\n self.password: str = urllib.parse.quote(password, safe=\"\")\n self.cookie: str = \"\"\n\n def login(self) -> str:\n try:\n url = f\"{self.url}/ajax/login\"\n headers = {\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cache-Control\": \"no-cache\",\n \"DNT\": \"1\",\n \"Pragma\": \"no-cache\",\n \"Referer\": f\"{self.url}/login\",\n \"Sec-Ch-Ua\": '\"Microsoft Edge\";v=\"105\", \"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"105\"',\n \"Sec-Ch-Ua-Mobile\": \"?0\",\n \"Sec-Ch-Ua-Platform\": '\"Windows\"',\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36 Edg/105.0.1343.42\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n data = {\"username\": self.username, \"password\": self.password}\n resp = requests.post(url, headers=headers, data=data)\n if not resp.text == '{\"Ok\":true}':\n self.logger.info(\"登录失败: 账户密码错误\")\n return \"登录失败: 账户密码错误\"\n self.cookie = (resp.headers[\"set-cookie\"]).split(\";\")[0]\n self.logger.info(\"登录成功\")\n return \"\"\n except Exception as e:\n self.logger.info(f\"登录失败: {e}\")\n return f\"登录失败: {e}\"\n\n def get_forward_rules(self):\n self.login()\n rules = self._get_forward_rules()\n statistics = self._get_statistics()\n return self._rules_statistics_summary(rules, statistics)\n\n def _get_forward_rules(self) -> list:\n url = f\"{self.url}/ajax/forward_rule\"\n headers = {\n \"accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"accept-language\": \"zh-CN,zh;q=0.9\",\n \"authority\": self.url.split(\"//\")[-1],\n \"cache-control\": \"no-cache\",\n \"cookie\": self.cookie,\n \"dnt\": \"1\",\n \"pragma\": \"no-cache\",\n \"referer\": f\"{self.url}/forward_rules\",\n \"sec-ch-ua\": '\"Microsoft Edge\";v=\"105\", \"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"105\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-platform\": '\"Windows\"',\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-origin\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36 Edg/105.0.1343.42\",\n \"x-requested-with\": \"XMLHttpRequest\",\n }\n resp = requests.get(url, headers=headers)\n resp_json = resp.json()\n return resp_json[\"Data\"]\n\n def _get_statistics(self):\n url = f\"{self.url}/ajax/forward_rule/statistics\"\n headers = {\n \"authority\": self.url.split(\"//\")[-1],\n \"accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"accept-language\": \"zh-CN,zh;q=0.9\",\n \"cache-control\": \"no-cache\",\n \"cookie\": self.cookie,\n \"dnt\": \"1\",\n \"pragma\": \"no-cache\",\n \"referer\": f\"{self.url}/forward_rules\",\n \"sec-ch-ua\": '\"Microsoft Edge\";v=\"105\", \"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"105\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-platform\": '\"Windows\"',\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-origin\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36 Edg/105.0.1343.42\",\n \"x-requested-with\": \"XMLHttpRequest\",\n }\n resp = requests.get(url, headers=headers)\n resp_json = resp.json()\n return resp_json[\"Data\"]\n\n def _rules_statistics_summary(self, rules: list, statistics: list):\n total = {}\n for rule in rules:\n total[rule[\"id\"]] = {\"name\": rule[\"name\"]}\n\n for one_statistics in statistics:\n if one_statistics[\"rule_id\"] in total:\n if \"traffic\" in total[one_statistics[\"rule_id\"]]:\n total[one_statistics[\"rule_id\"]][\"traffic\"] += one_statistics[\n \"traffic\"\n ]\n else:\n total[one_statistics[\"rule_id\"]][\"traffic\"] = one_statistics[\n \"traffic\"\n ]\n # 将字节转换为G / 1073741824\n for k, v in total.items():\n total[k][\"traffic\"] = round(total[k][\"traffic\"] / 1073741824, 2)\n return total" }, { "identifier": "Task", "path": "utils/task/model.py", "snippet": "class Task(metaclass=SingletonMeta):\n def __init__(self, args) -> None:\n self.conf_file = args.config\n\n self.bot_token: str = \"\"\n\n self.pfgo_url: str = \"\"\n self.username: str = \"\"\n self.password: str = \"\"\n self.hide: list = []\n\n self.webhook_url = \"\"\n self.webhook_port = \"\"\n self.running_host = \"\"\n self.running_port = 0\n\n self._init_conf()\n\n def _init_conf(self):\n config = configparser.ConfigParser()\n config.read(self.conf_file)\n self.bot_token = config.get(\"bot\", \"token\")\n\n self.pfgo_url = config.get(\"pfgo\", \"url\")\n self.username = config.get(\"pfgo\", \"username\")\n self.password = config.get(\"pfgo\", \"password\")\n self.hide += config.get(\"pfgo\", \"hide\").split(\",\")\n\n self.webhook_url = config.get(\"webhook\", \"webhook_url\")\n self.webhook_port = config.get(\"webhook\", \"webhook_port\")\n self.running_host = config.get(\"webhook\", \"running_host\")\n self.running_port = int(config.get(\"webhook\", \"running_port\"))" }, { "identifier": "is_administrator", "path": "utils/tg_tools/base.py", "snippet": "def is_administrator(id: int, members: List[ChatMember]) -> bool:\n for member in members:\n if member.user.id == id:\n return True\n return False" } ]
from telebot import TeleBot from telebot.types import Message from utils.base import str_in_list_str from utils.pfgo_spider.spider import PfgoSpider from utils.task import Task from utils.tg_tools.base import is_administrator
2,021
def get_all_status(bot: TeleBot, message: Message): chat_id = message.chat.id task = Task() bot_info = bot.get_me() if is_administrator(bot_info.id, bot.get_chat_administrators(chat_id)): sent_message = bot.send_message(chat_id, "开始查询数据") s = PfgoSpider(task.pfgo_url, task.username, task.password) try: data = s.get_forward_rules() result_text = "" for _, v in data.items():
def get_all_status(bot: TeleBot, message: Message): chat_id = message.chat.id task = Task() bot_info = bot.get_me() if is_administrator(bot_info.id, bot.get_chat_administrators(chat_id)): sent_message = bot.send_message(chat_id, "开始查询数据") s = PfgoSpider(task.pfgo_url, task.username, task.password) try: data = s.get_forward_rules() result_text = "" for _, v in data.items():
if str_in_list_str(v["name"], task.hide):
0
2023-12-28 08:55:04+00:00
4k
bclavie/RAGatouille
ragatouille/RAGPretrainedModel.py
[ { "identifier": "CorpusProcessor", "path": "ragatouille/data/corpus_processor.py", "snippet": "class CorpusProcessor:\n def __init__(\n self,\n document_splitter_fn: Optional[Callable] = llama_index_sentence_splitter,\n preprocessing_fn: Optional[Union[Callable, list[Callable]]] = None,\n ):\n self.document_splitter_fn = document_splitter_fn\n self.preprocessing_fn = preprocessing_fn\n\n def process_corpus(\n self,\n documents: list[str],\n **splitter_kwargs,\n ) -> list[str]:\n # TODO CHECK KWARGS\n if self.document_splitter_fn is not None:\n documents = self.document_splitter_fn(documents, **splitter_kwargs)\n if self.preprocessing_fn is not None:\n if isinstance(self.preprocessing_fn, list):\n for fn in self.preprocessing_fn:\n documents = fn(documents)\n return documents\n return self.preprocessing_fn(documents)\n return documents" }, { "identifier": "llama_index_sentence_splitter", "path": "ragatouille/data/preprocessors.py", "snippet": "def llama_index_sentence_splitter(documents: list[str], chunk_size=256):\n chunk_overlap = min(chunk_size / 4, min(chunk_size / 2, 64))\n chunks = []\n node_parser = SentenceSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n docs = [[Document(text=doc)] for doc in documents]\n for doc in docs:\n chunks += [node.text for node in node_parser(doc)]\n return chunks" }, { "identifier": "LateInteractionModel", "path": "ragatouille/models/base.py", "snippet": "class LateInteractionModel(ABC):\n @abstractmethod\n def __init__(\n self,\n pretrained_model_name_or_path: Union[str, Path],\n n_gpu,\n ):\n ...\n\n @abstractmethod\n def train():\n ...\n\n @abstractmethod\n def index(self, name: str, collection: list[str]):\n ...\n\n @abstractmethod\n def add_to_index(self):\n ...\n\n @abstractmethod\n def search(self, name: str, query: Union[str, list[str]]):\n ...\n\n @abstractmethod\n def _search(self, name: str, query: str):\n ...\n\n @abstractmethod\n def _batch_search(self, name: str, queries: list[str]):\n ..." }, { "identifier": "ColBERT", "path": "ragatouille/models/colbert.py", "snippet": "class ColBERT(LateInteractionModel):\n def __init__(\n self,\n pretrained_model_name_or_path: Union[str, Path],\n n_gpu: int = -1,\n index_name: Optional[str] = None,\n verbose: int = 1,\n load_from_index: bool = False,\n **kwargs,\n ):\n self.verbose = verbose\n self.collection = None\n if n_gpu == -1:\n n_gpu = 1 if torch.cuda.device_count() == 0 else torch.cuda.device_count()\n\n if load_from_index:\n ckpt_config = ColBERTConfig.load_from_index(\n str(pretrained_model_name_or_path)\n )\n self.config = ckpt_config\n self.run_config = RunConfig(\n nranks=n_gpu, experiment=self.config.experiment, root=self.config.root\n )\n self.checkpoint = self.config.checkpoint\n self.index_name = self.config.index_name\n self.collection = self._get_collection_from_file(\n str(pretrained_model_name_or_path / \"collection.json\")\n )\n else:\n ckpt_config = ColBERTConfig.load_from_checkpoint(\n str(pretrained_model_name_or_path)\n )\n self.run_config = RunConfig(\n nranks=n_gpu, experiment=\"colbert\", root=\".ragatouille/\"\n )\n local_config = ColBERTConfig(**kwargs)\n self.config = ColBERTConfig.from_existing(\n ckpt_config,\n local_config,\n )\n self.checkpoint = pretrained_model_name_or_path\n self.index_name = index_name\n\n self.run_context = Run().context(self.run_config)\n self.run_context.__enter__() # Manually enter the context\n self.searcher = None\n\n def _update_index(self, new_documents: list[str], searcher: Searcher):\n updater = IndexUpdater(\n config=self.config, searcher=searcher, checkpoint=self.checkpoint\n )\n updater.add(new_documents)\n updater.persist_to_disk()\n\n def _get_collection_from_file(self, collection_path: str):\n return srsly.read_json(collection_path)\n\n def _write_collection_to_file(self, collection, collection_path: str):\n srsly.write_json(collection_path, collection)\n\n def add_to_index(\n self,\n new_documents: list[str],\n index_name: Optional[str] = None,\n ):\n self.index_name = index_name if index_name is not None else self.index_name\n if self.index_name is None:\n print(\n \"Cannot add to index without an index_name! Please provide one.\",\n \"Returning empty results.\",\n )\n return None\n\n print(\n \"WARNING: add_to_index support is currently experimental!\",\n \"add_to_index support will be more thorough in future versions\",\n )\n\n searcher = Searcher(\n checkpoint=self.checkpoint,\n config=None,\n collection=self.collection,\n index=self.index_name,\n verbose=self.verbose,\n )\n new_documents = list(set(new_documents))\n current_len = len(searcher.collection)\n new_doc_len = len(new_documents)\n\n if (\n current_len + new_doc_len < 5000\n or new_doc_len > current_len * 0.05\n or current_len + new_doc_len\n > 100 # Export bug handler -- TODO: Remove this requirement\n ):\n new_documents += [x for x in searcher.collection]\n self.index(\n new_documents,\n index_name=self.index_name,\n max_document_length=self.config.doc_maxlen,\n overwrite=\"force_silent_overwrite\",\n )\n else:\n self._update_index(new_documents, searcher)\n\n print(\n f\"Successfully updated index with {new_doc_len} new documents!\\n\",\n f\"New index size: {new_doc_len + current_len}\",\n )\n\n return str(\n Path(self.run_config.root)\n / Path(self.run_config.experiment)\n / \"indexes\"\n / self.index_name\n )\n\n def index(\n self,\n collection: list[str],\n index_name: Optional[\"str\"] = None,\n max_document_length: int = 256,\n overwrite: Union[bool, str] = \"reuse\",\n ):\n self.config.doc_maxlen = max_document_length\n if index_name is not None:\n if self.index_name is not None:\n print(\n f\"New index_name received!\",\n f\"Updating current index_name ({self.index_name}) to {index_name}\",\n )\n self.index_name = index_name\n else:\n if self.index_name is None:\n print(\n f\"No index_name received!\",\n f\"Using default index_name ({self.checkpoint}_new_index)\",\n )\n self.index_name = self.checkpoint + \"new_index\"\n\n collection = list(set(collection))\n self.collection = collection\n\n nbits = 2\n if len(collection) < 5000:\n nbits = 8\n elif len(collection) < 10000:\n nbits = 4\n self.config = ColBERTConfig.from_existing(\n self.config, ColBERTConfig(nbits=nbits)\n )\n self.indexer = Indexer(\n checkpoint=self.checkpoint,\n config=self.config,\n verbose=self.verbose,\n )\n self.indexer.index(\n name=self.index_name, collection=collection, overwrite=overwrite\n )\n\n index_path = str(\n Path(self.run_config.root)\n / Path(self.run_config.experiment)\n / \"indexes\"\n / self.index_name\n )\n self._write_collection_to_file(collection, index_path + \"/collection.json\")\n print(\"Done indexing!\")\n\n def _load_searcher(\n self,\n index_name: Optional[str],\n force_fast: bool = False,\n ):\n if index_name is not None:\n if self.index_name is not None:\n print(\n f\"New index_name received!\",\n f\"Updating current index_name ({self.index_name}) to {index_name}\",\n )\n self.index_name = index_name\n else:\n if self.index_name is None:\n print(\n \"Cannot search without an index_name! Please provide one.\",\n \"Returning empty results.\",\n )\n return None\n print(\n f\"Loading searcher for index {self.index_name} for the first time...\",\n \"This may take a few seconds\",\n )\n self.searcher = Searcher(\n checkpoint=self.checkpoint,\n config=None,\n collection=self.collection,\n index=self.index_name,\n )\n\n if not force_fast:\n if len(self.searcher.collection) < 10000:\n self.searcher.configure(ncells=4)\n self.searcher.configure(centroid_score_threshold=0.4)\n self.searcher.configure(ndocs=512)\n elif len(self.searcher.collection) < 100000:\n self.searcher.configure(ncells=2)\n self.searcher.configure(centroid_score_threshold=0.45)\n self.searcher.configure(ndocs=1024)\n # Otherwise, use defaults for k\n else:\n # Use fast settingss\n self.searcher.configure(ncells=1)\n self.searcher.configure(centroid_score_threshold=0.5)\n self.searcher.configure(ndocs=256)\n\n print(\"Searcher loaded!\")\n\n def search(\n self,\n query: Union[str, list[str]],\n index_name: Optional[\"str\"] = None,\n k: int = 10,\n force_fast: bool = False,\n zero_index_ranks: bool = False,\n ):\n if self.searcher is None or (\n index_name is not None and self.index_name != index_name\n ):\n self._load_searcher(index_name=index_name, force_fast=force_fast)\n\n if isinstance(query, str):\n results = [self._search(query, k)]\n else:\n results = self._batch_search(query, k)\n\n to_return = []\n\n for result in results:\n result_for_query = []\n for id_, rank, score in zip(*result):\n result_for_query.append(\n {\n \"content\": self.searcher.collection[id_],\n \"score\": score,\n \"rank\": rank - 1 if zero_index_ranks else rank,\n }\n )\n to_return.append(result_for_query)\n\n if len(to_return) == 1:\n return to_return[0]\n return to_return\n\n def _search(self, query: str, k: int):\n return self.searcher.search(query, k=k)\n\n def _batch_search(self, query: list[str], k: int):\n queries = {i: x for i, x in enumerate(query)}\n results = self.searcher.search_all(queries, k=k)\n results = [\n [list(zip(*value))[i] for i in range(3)]\n for value in results.todict().values()\n ]\n return results\n\n def train(self, data_dir, training_config: ColBERTConfig):\n training_config = ColBERTConfig.from_existing(self.config, training_config)\n training_config.nway = 2\n with Run().context(self.run_config):\n trainer = Trainer(\n triples=str(data_dir / \"triples.train.colbert.jsonl\"),\n queries=str(data_dir / \"queries.train.colbert.tsv\"),\n collection=str(data_dir / \"corpus.train.colbert.tsv\"),\n config=training_config,\n )\n\n trainer.train(checkpoint=self.checkpoint)\n\n def __del__(self):\n # Clean up context\n self.run_context.__exit__(None, None, None)" } ]
from typing import Callable, Optional, Union, List, Any from pathlib import Path from langchain_core.retrievers import BaseRetriever from langchain_core.documents import Document from langchain_core.callbacks.manager import ( CallbackManagerForRetrieverRun, ) from ragatouille.data.corpus_processor import CorpusProcessor from ragatouille.data.preprocessors import llama_index_sentence_splitter from ragatouille.models import LateInteractionModel, ColBERT
3,181
class RAGPretrainedModel: """ Wrapper class for a pretrained RAG late-interaction model, and all the associated utilities. Allows you to load a pretrained model from disk or from the hub, build or query an index. ## Usage Load a pre-trained checkpoint: ```python from ragatouille import RAGPretrainedModel RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0") ``` Load checkpoint from an existing index: ```python from ragatouille import RAGPretrainedModel RAG = RAGPretrainedModel.from_index("path/to/my/index") ``` Both methods will load a fully initialised instance of ColBERT, which you can use to build and query indexes. ```python RAG.search("How many people live in France?") ``` """ model_name: Union[str, None] = None model: Union[LateInteractionModel, None] = None
class RAGPretrainedModel: """ Wrapper class for a pretrained RAG late-interaction model, and all the associated utilities. Allows you to load a pretrained model from disk or from the hub, build or query an index. ## Usage Load a pre-trained checkpoint: ```python from ragatouille import RAGPretrainedModel RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0") ``` Load checkpoint from an existing index: ```python from ragatouille import RAGPretrainedModel RAG = RAGPretrainedModel.from_index("path/to/my/index") ``` Both methods will load a fully initialised instance of ColBERT, which you can use to build and query indexes. ```python RAG.search("How many people live in France?") ``` """ model_name: Union[str, None] = None model: Union[LateInteractionModel, None] = None
corpus_processor: Optional[CorpusProcessor] = None
0
2023-12-29 16:26:42+00:00
4k
Caipengzhou/BRAU-Netplusplus
networks/biformer.py
[ { "identifier": "Attention", "path": "networks/_common.py", "snippet": "class Attention(nn.Module):\n \"\"\"\n vanilla attention\n \"\"\"\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights\n self.scale = qk_scale or head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x):\n \"\"\"\n args:\n x: NHWC tensor\n return:\n NHWC tensor\n \"\"\"\n _, H, W, _ = x.size()\n x = rearrange(x, 'n h w c -> n (h w) c')\n \n #######################################\n B, N, C = x.shape \n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n #######################################\n\n x = rearrange(x, 'n (h w) c -> n h w c', h=H, w=W)\n return x" }, { "identifier": "AttentionLePE", "path": "networks/_common.py", "snippet": "class AttentionLePE(nn.Module):\n \"\"\"\n vanilla attention\n \"\"\"\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., side_dwconv=5):\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights\n self.scale = qk_scale or head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n self.lepe = nn.Conv2d(dim, dim, kernel_size=side_dwconv, stride=1, padding=side_dwconv//2, groups=dim) if side_dwconv > 0 else \\\n lambda x: torch.zeros_like(x)\n\n def forward(self, x):\n \"\"\"\n args:\n x: NHWC tensor\n return:\n NHWC tensor\n \"\"\"\n _, H, W, _ = x.size()\n x = rearrange(x, 'n h w c -> n (h w) c')\n \n #######################################\n B, N, C = x.shape \n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n lepe = self.lepe(rearrange(x, 'n (h w) c -> n c h w', h=H, w=W))\n lepe = rearrange(lepe, 'n c h w -> n (h w) c')\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = x + lepe\n\n x = self.proj(x)\n x = self.proj_drop(x)\n #######################################\n\n x = rearrange(x, 'n (h w) c -> n h w c', h=H, w=W)\n return x" }, { "identifier": "DWConv", "path": "networks/_common.py", "snippet": "class DWConv(nn.Module):\n def __init__(self, dim=768):\n super(DWConv, self).__init__()\n self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)\n\n def forward(self, x):\n \"\"\"\n x: NHWC tensor\n \"\"\"\n x = x.permute(0, 3, 1, 2) #NCHW\n x = self.dwconv(x)\n x = x.permute(0, 2, 3, 1) #NHWC\n\n return x" } ]
import torch import torch.nn as nn from einops import rearrange from timm.models.layers import DropPath from bra import BiLevelRoutingAttention from ._common import Attention, AttentionLePE, DWConv
1,738
""" BiFormer impl. author: ZHU Lei github: https://github.com/rayleizhu email: [email protected] This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class Block(nn.Module): def __init__(self, dim, input_resolution, drop_path=0., layer_scale_init_value=-1,num_heads=8, n_win=7, qk_dim=None, qk_scale=None, kv_per_win=4, kv_downsample_ratio=4, kv_downsample_kernel=None, kv_downsample_mode='ada_avgpool', topk=4, param_attention="qkvo", param_routing=False, diff_routing=False, soft_routing=False, mlp_ratio=4, mlp_dwconv=False, side_dwconv=5, before_attn_dwconv=3, pre_norm=True, auto_pad=False): super().__init__() qk_dim = qk_dim or dim self.input_resolution=input_resolution # modules if before_attn_dwconv > 0: self.pos_embed = nn.Conv2d(dim, dim, kernel_size=before_attn_dwconv, padding=1, groups=dim) else: self.pos_embed = lambda x: 0 self.norm1 = nn.LayerNorm(dim, eps=1e-6) # important to avoid attention collapsing if topk > 0: self.attn = BiLevelRoutingAttention(dim=dim, num_heads=num_heads, n_win=n_win, qk_dim=qk_dim, qk_scale=qk_scale, kv_per_win=kv_per_win, kv_downsample_ratio=kv_downsample_ratio, kv_downsample_kernel=kv_downsample_kernel, kv_downsample_mode=kv_downsample_mode, topk=topk, param_attention=param_attention, param_routing=param_routing, diff_routing=diff_routing, soft_routing=soft_routing, side_dwconv=side_dwconv, auto_pad=auto_pad) elif topk == -1:
""" BiFormer impl. author: ZHU Lei github: https://github.com/rayleizhu email: [email protected] This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class Block(nn.Module): def __init__(self, dim, input_resolution, drop_path=0., layer_scale_init_value=-1,num_heads=8, n_win=7, qk_dim=None, qk_scale=None, kv_per_win=4, kv_downsample_ratio=4, kv_downsample_kernel=None, kv_downsample_mode='ada_avgpool', topk=4, param_attention="qkvo", param_routing=False, diff_routing=False, soft_routing=False, mlp_ratio=4, mlp_dwconv=False, side_dwconv=5, before_attn_dwconv=3, pre_norm=True, auto_pad=False): super().__init__() qk_dim = qk_dim or dim self.input_resolution=input_resolution # modules if before_attn_dwconv > 0: self.pos_embed = nn.Conv2d(dim, dim, kernel_size=before_attn_dwconv, padding=1, groups=dim) else: self.pos_embed = lambda x: 0 self.norm1 = nn.LayerNorm(dim, eps=1e-6) # important to avoid attention collapsing if topk > 0: self.attn = BiLevelRoutingAttention(dim=dim, num_heads=num_heads, n_win=n_win, qk_dim=qk_dim, qk_scale=qk_scale, kv_per_win=kv_per_win, kv_downsample_ratio=kv_downsample_ratio, kv_downsample_kernel=kv_downsample_kernel, kv_downsample_mode=kv_downsample_mode, topk=topk, param_attention=param_attention, param_routing=param_routing, diff_routing=diff_routing, soft_routing=soft_routing, side_dwconv=side_dwconv, auto_pad=auto_pad) elif topk == -1:
self.attn = Attention(dim=dim)
0
2023-12-29 05:45:26+00:00
4k
shibing624/chatgpt-webui
src/overwrites.py
[ { "identifier": "chuanhu_path", "path": "src/presets.py", "snippet": "class I18nAuto:\n def __init__(self):\n def __call__(self, key):\nCHATGLM_MODEL = None\nCHATGLM_TOKENIZER = None\nLLAMA_MODEL = None\nLLAMA_INFERENCER = None\nINITIAL_SYSTEM_PROMPT = \"You are a helpful assistant.\"\nAPI_HOST = \"api.openai.com\"\nOPENAI_API_BASE = \"https://api.openai.com/v1\"\nCHAT_COMPLETION_URL = \"https://api.openai.com/v1/chat/completions\"\nIMAGES_COMPLETION_URL = \"https://api.openai.com/v1/images/generations\"\nCOMPLETION_URL = \"https://api.openai.com/v1/completions\"\nBALANCE_API_URL = \"https://api.openai.com/dashboard/billing/credit_grants\"\nUSAGE_API_URL = \"https://api.openai.com/dashboard/billing/usage\"\nHISTORY_DIR = os.path.join(pwd_path, '../history')\nTEMPLATES_DIR = os.path.join(pwd_path, '../templates')\nSTANDARD_ERROR_MSG = i18n(\"☹️发生了错误:\") # 错误信息的标准前缀\nGENERAL_ERROR_MSG = i18n(\"获取对话时发生错误,请查看后台日志\")\nERROR_RETRIEVE_MSG = i18n(\"请检查网络连接,或者API-Key是否有效。\")\nCONNECTION_TIMEOUT_MSG = i18n(\"连接超时,无法获取对话。\") # 连接超时\nREAD_TIMEOUT_MSG = i18n(\"读取超时,无法获取对话。\") # 读取超时\nPROXY_ERROR_MSG = i18n(\"代理错误,无法获取对话。\") # 代理错误\nSSL_ERROR_PROMPT = i18n(\"SSL错误,无法获取对话。\") # SSL 错误\nNO_APIKEY_MSG = i18n(\"API key为空,请检查是否输入正确。\") # API key 长度不足 51 位\nNO_INPUT_MSG = i18n(\"请输入对话内容。\") # 未输入对话内容\nBILLING_NOT_APPLICABLE_MSG = i18n(\"账单信息不适用\") # 本地运行的模型返回的账单信息\nTIMEOUT_STREAMING = 60 # 流式对话时的超时时间\nTIMEOUT_ALL = 200 # 非流式对话时的超时时间\nENABLE_STREAMING_OPTION = True # 是否启用选择选择是否实时显示回答的勾选框\nHIDE_MY_KEY = True # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True\nCONCURRENT_COUNT = 100 # 允许同时使用的用户数量\nSIM_K = 5\nINDEX_QUERY_TEMPRATURE = 1.0\nCHUANHU_TITLE = i18n(\"ChatGPT 🚀\")\nCHUANHU_DESCRIPTION = i18n(\"GitHub: [shibing624/chatgpt-webui](https://github.com/shibing624/chatgpt-webui)\")\nONLINE_MODELS = [\n \"gpt-3.5-turbo\",\n \"gpt-3.5-turbo-16k\",\n \"gpt-3.5-turbo-0301\",\n \"gpt-3.5-turbo-0613\",\n \"gpt-3.5-turbo-1106\",\n \"gpt-4\",\n \"gpt-4-32k\",\n \"gpt-4-1106-preview\",\n \"gpt-4-vision-preview\",\n]\nMODEL_TOKEN_LIMIT = {\n \"gpt-3.5-turbo\": 4096,\n \"gpt-3.5-turbo-16k\": 16384,\n \"gpt-3.5-turbo-0301\": 4096,\n \"gpt-3.5-turbo-0613\": 4096,\n \"gpt-3.5-turbo-1106\": 16384,\n \"gpt-4\": 8192,\n \"gpt-4-32k\": 32768,\n \"gpt-4-1106-preview\": 128000,\n \"gpt-4-vision-preview\": 128000,\n}\nLOCAL_MODELS = {\n \"chatglm3-6b\": \"THUDM/chatglm3-6b\",\n \"llama-2-7b-chat\": \"TheBloke/Llama-2-7B-Chat-GPTQ\",\n \"yi-6b-chat-8bits\": \"01-ai/Yi-6B-Chat-8bits\",\n \"yi-6b-chat\": \"01-ai/Yi-6B-Chat\",\n}\nMODELS = ONLINE_MODELS + list(LOCAL_MODELS.keys())\nDEFAULT_MODEL = 0\nTOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。\nDEFAULT_TOKEN_LIMIT = 3000 # 默认的token上限\nREDUCE_TOKEN_FACTOR = 0.5 # 与模型token上限想乘,得到目标token数。减少token占用时,将token占用减少到目标token数以下。\nREPLY_LANGUAGES = [\n \"简体中文\",\n \"繁體中文\",\n \"English\",\n \"日本語\",\n \"Español\",\n \"Français\",\n \"Deutsch\",\n \"跟随问题语言(不稳定)\"\n]\nHISTORY_NAME_METHODS = [\n i18n(\"根据日期时间\"),\n i18n(\"第一条提问\"),\n i18n(\"模型自动总结(消耗tokens)\"),\n]\nWEBSEARCH_PTOMPT_TEMPLATE = \"\"\"\\\nWeb search results:\n\n{web_results}\nCurrent date: {current_date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.\nQuery: {query}\nReply in {reply_language}\n\"\"\"\nPROMPT_TEMPLATE = \"\"\"\\\nContext information is below.\n---------------------\n{context_str}\n---------------------\nCurrent date: {current_date}.\nUsing the provided context information, write a comprehensive reply to the given query.\nMake sure to cite results using [number] notation after the reference.\nIf the provided context information refer to multiple subjects with the same name, write separate answers for each subject.\nUse prior knowledge only if the given context didn't provide enough information.\nAnswer the question: {query_str}\nReply in {reply_language}\n\"\"\"\nREFINE_TEMPLATE = \"\"\"\\\nThe original question is as follows: {query_str}\nWe have provided an existing answer: {existing_answer}\nWe have the opportunity to refine the existing answer\n(only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better\nReply in {reply_language}\nIf the context isn't useful, return the original answer.\n\"\"\"\nSUMMARIZE_PROMPT = \"\"\"Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN 中文:\"\"\"\nSUMMARY_CHAT_SYSTEM_PROMPT = \"\"\"\\\nPlease summarize the following conversation for a chat topic.\nNo more than 16 characters.\nNo special characters.\nPunctuation mark is banned.\nNot including '.' ':' '?' '!' '“' '*' '<' '>'.\nReply in user's language.\n\"\"\"\nALREADY_CONVERTED_MARK = \"<!-- ALREADY CONVERTED BY PARSER. -->\"\nSTART_OF_OUTPUT_MARK = \"<!-- SOO IN MESSAGE -->\"\nEND_OF_OUTPUT_MARK = \"<!-- EOO IN MESSAGE -->\"" }, { "identifier": "convert_bot_before_marked", "path": "src/utils.py", "snippet": "def convert_bot_before_marked(chat_message):\n \"\"\"\n 注意不能给输出加缩进, 否则会被marked解析成代码块\n \"\"\"\n if '<div class=\"md-message\">' in chat_message:\n return chat_message\n else:\n raw = f'<div class=\"raw-message hideM\">{clip_rawtext(chat_message)}</div>'\n # really_raw = f'{START_OF_OUTPUT_MARK}<div class=\"really-raw hideM\">{clip_rawtext(chat_message, need_escape=False)}\\n</div>{END_OF_OUTPUT_MARK}'\n\n code_block_pattern = re.compile(r\"```(.*?)(?:```|$)\", re.DOTALL)\n code_blocks = code_block_pattern.findall(chat_message)\n non_code_parts = code_block_pattern.split(chat_message)[::2]\n result = []\n for non_code, code in zip(non_code_parts, code_blocks + [\"\"]):\n if non_code.strip():\n result.append(non_code)\n if code.strip():\n code = f\"\\n```{code}\\n```\"\n result.append(code)\n result = \"\".join(result)\n md = f'<div class=\"md-message\">\\n\\n{result}\\n</div>'\n return raw + md" }, { "identifier": "convert_user_before_marked", "path": "src/utils.py", "snippet": "def convert_user_before_marked(chat_message):\n if '<div class=\"user-message\">' in chat_message:\n return chat_message\n else:\n return f'<div class=\"user-message\">{escape_markdown(chat_message)}</div>'" } ]
import os import gradio as gr from collections import namedtuple from gradio.utils import validate_url from gradio_client import utils as client_utils from src.presets import chuanhu_path, assets_path from src.utils import convert_bot_before_marked, convert_user_before_marked
3,116
def postprocess( self, y, ): """ Parameters: y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed. Returns: List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed. """ if y is None: return [] processed_messages = [] for message_pair in y: assert isinstance( message_pair, (tuple, list) ), f"Expected a list of lists or list of tuples. Received: {message_pair}" assert ( len(message_pair) == 2 ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}" processed_messages.append( [ self._postprocess_chat_messages(message_pair[0], "user"), self._postprocess_chat_messages(message_pair[1], "bot"), ] ) return processed_messages def postprocess_chat_messages( self, chat_message, role: str ): if chat_message is None: return None elif isinstance(chat_message, (tuple, list)): file_uri = chat_message[0] if validate_url(file_uri): filepath = file_uri else: filepath = self.make_temp_copy_if_needed(file_uri) mime_type = client_utils.get_mimetype(filepath) return { "name": filepath, "mime_type": mime_type, "alt_text": chat_message[1] if len(chat_message) > 1 else None, "data": None, # These last two fields are filled in by the frontend "is_file": True, } elif isinstance(chat_message, str): # chat_message = inspect.cleandoc(chat_message) # escape html spaces # chat_message = chat_message.replace(" ", "&nbsp;") if role == "bot": chat_message = convert_bot_before_marked(chat_message) elif role == "user": chat_message = convert_user_before_marked(chat_message) return chat_message else: raise ValueError(f"Invalid message for Chatbot component: {chat_message}") def add_classes_to_gradio_component(comp): """ this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others code from stable-diffusion-webui <AUTOMATIC1111/stable-diffusion-webui> """ comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] if getattr(comp, 'multiselect', False): comp.elem_classes.append('multiselect') def IOComponent_init(self, *args, **kwargs): res = original_IOComponent_init(self, *args, **kwargs) add_classes_to_gradio_component(self) return res original_IOComponent_init = gr.components.IOComponent.__init__ gr.components.IOComponent.__init__ = IOComponent_init def BlockContext_init(self, *args, **kwargs): res = original_BlockContext_init(self, *args, **kwargs) add_classes_to_gradio_component(self) return res original_BlockContext_init = gr.blocks.BlockContext.__init__ gr.blocks.BlockContext.__init__ = BlockContext_init def get_html(filename): path = os.path.join(chuanhu_path, "assets", "html", filename) if os.path.exists(path): with open(path, encoding="utf8") as file: return file.read() return "" def webpath(fn):
def postprocess( self, y, ): """ Parameters: y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed. Returns: List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed. """ if y is None: return [] processed_messages = [] for message_pair in y: assert isinstance( message_pair, (tuple, list) ), f"Expected a list of lists or list of tuples. Received: {message_pair}" assert ( len(message_pair) == 2 ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}" processed_messages.append( [ self._postprocess_chat_messages(message_pair[0], "user"), self._postprocess_chat_messages(message_pair[1], "bot"), ] ) return processed_messages def postprocess_chat_messages( self, chat_message, role: str ): if chat_message is None: return None elif isinstance(chat_message, (tuple, list)): file_uri = chat_message[0] if validate_url(file_uri): filepath = file_uri else: filepath = self.make_temp_copy_if_needed(file_uri) mime_type = client_utils.get_mimetype(filepath) return { "name": filepath, "mime_type": mime_type, "alt_text": chat_message[1] if len(chat_message) > 1 else None, "data": None, # These last two fields are filled in by the frontend "is_file": True, } elif isinstance(chat_message, str): # chat_message = inspect.cleandoc(chat_message) # escape html spaces # chat_message = chat_message.replace(" ", "&nbsp;") if role == "bot": chat_message = convert_bot_before_marked(chat_message) elif role == "user": chat_message = convert_user_before_marked(chat_message) return chat_message else: raise ValueError(f"Invalid message for Chatbot component: {chat_message}") def add_classes_to_gradio_component(comp): """ this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others code from stable-diffusion-webui <AUTOMATIC1111/stable-diffusion-webui> """ comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] if getattr(comp, 'multiselect', False): comp.elem_classes.append('multiselect') def IOComponent_init(self, *args, **kwargs): res = original_IOComponent_init(self, *args, **kwargs) add_classes_to_gradio_component(self) return res original_IOComponent_init = gr.components.IOComponent.__init__ gr.components.IOComponent.__init__ = IOComponent_init def BlockContext_init(self, *args, **kwargs): res = original_BlockContext_init(self, *args, **kwargs) add_classes_to_gradio_component(self) return res original_BlockContext_init = gr.blocks.BlockContext.__init__ gr.blocks.BlockContext.__init__ = BlockContext_init def get_html(filename): path = os.path.join(chuanhu_path, "assets", "html", filename) if os.path.exists(path): with open(path, encoding="utf8") as file: return file.read() return "" def webpath(fn):
if fn.startswith(assets_path):
0
2023-12-27 12:14:26+00:00
4k
Rounak40/Fast-Torrent-Downloader
main.py
[ { "identifier": "x1337", "path": "torrents.py", "snippet": "class x1337:\r\n def __init__(self, limit=10):\r\n self.BASE_URL = \"https://1337x.unblockit.ing\"\r\n self.LIMIT = limit\r\n self.session = requests.Session()\r\n self.session.headers = {\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',\r\n }\r\n \r\n\r\n def get_magnet_link(self,data):\r\n response = self.session.get(data[\"url\"])\r\n magnet_link = 'magnet:?' + response.text.split('href=\"magnet:?')[1].split(\"\\\"\")[0].strip()\r\n return magnet_link\r\n\r\n def search_torrent(self,query):\r\n torrents_list = []\r\n page = 0\r\n while len(torrents_list) < self.LIMIT:\r\n\r\n page += 1\r\n formated_query = query.replace(\" \",\"+\")\r\n page_url = f\"{self.BASE_URL}/search/{formated_query}/{page}/\"\r\n response = self.session.get(page_url)\r\n soup = BeautifulSoup(response.text,\"lxml\")\r\n\r\n rows = soup.findAll(\"tr\")\r\n\r\n if len(rows) == 0:\r\n break\r\n\r\n for row in rows:\r\n temp_dict = {}\r\n for col in row.findAll(\"td\"):\r\n class_name = col.get(\"class\")[-1]\r\n if not class_name:\r\n continue\r\n\r\n if \"name\" in class_name:\r\n temp_dict[\"title\"] = col.text.strip(\"â\\xad\\x90\")\r\n link = col.findAll(\"a\")[-1].get(\"href\")\r\n if link != None:\r\n temp_dict[\"url\"] = self.BASE_URL+col.findAll(\"a\")[-1].get(\"href\")\r\n elif \"seeds\" in class_name:\r\n temp_dict[\"seeders\"] = int(col.text.strip())\r\n elif \"leeches\" in class_name:\r\n temp_dict[\"leechers\"] = int(col.text.strip())\r\n elif \"date\" in class_name:\r\n temp_dict[\"upload_date\"] = col.text.strip()\r\n elif \"mob-trial-uploader\" in class_name or \"mob-user\" in class_name or \"mob-vip\" in class_name or \"mob-uploader\" in class_name:\r\n size = col.text.strip()\r\n if \"GB\" in size:\r\n temp_dict[\"size\"] = size.split(\"GB\")[0].strip() + \" GB\"\r\n elif \"MB\" in size:\r\n temp_dict[\"size\"] = size.split(\"MB\")[0].strip() + \" MB\"\r\n else:\r\n temp_dict[\"size\"] = size\r\n elif \"trial-uploader\" in class_name or \"user\" in class_name or \"vip\" in class_name or \"uploader\" in class_name:\r\n temp_dict[\"uploader\"] = col.text.strip()\r\n \r\n if temp_dict.get(\"url\"):\r\n torrents_list.append(temp_dict)\r\n\r\n if len(torrents_list) == self.LIMIT:\r\n break\r\n return torrents_list\r" }, { "identifier": "thepiratebay", "path": "torrents.py", "snippet": "class thepiratebay:\r\n def __init__(self, limit=10):\r\n self.BASE_URL = \"https://thepiratebaye.org\"\r\n self.LIMIT = limit\r\n self.session = requests.Session()\r\n self.session.headers = {\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',\r\n }\r\n \r\n def get_magnet_link(self,data):\r\n trackers = [\r\n 'udp://tracker.coppersurfer.tk:6969/announce',\r\n 'udp://tracker.openbittorrent.com:6969/announce',\r\n 'udp://tracker.opentrackr.org:1337',\r\n 'udp://movies.zsw.ca:6969/announce',\r\n 'udp://tracker.dler.org:6969/announce',\r\n 'udp://opentracker.i2p.rocks:6969/announce',\r\n 'udp://open.stealth.si:80/announce',\r\n 'udp://tracker.0x.tf:6969/announce'\r\n ]\r\n\r\n name = urllib.parse.quote(data[\"title\"])\r\n ih = data[\"info_hash\"]\r\n\r\n\r\n magnet_link = f'magnet:?xt=urn:btih:{ih}&dn={name}'\r\n\r\n for tracker in trackers:\r\n magnet_link += '&tr=' + urllib.parse.quote(tracker)\r\n \r\n return magnet_link\r\n\r\n def search_torrent(self,query):\r\n torrents_list = []\r\n\r\n formated_query = query.replace(\" \",\"+\")\r\n page_url = f\"{self.BASE_URL}/api.php?url=/q.php?q={formated_query}&cat=\"\r\n response = self.session.get(page_url)\r\n \r\n for result in response.json():\r\n temp_dict = {\r\n \"title\": result[\"name\"],\r\n \"info_hash\": result[\"info_hash\"],\r\n \"seeders\": result[\"seeders\"],\r\n \"leechers\": result[\"leechers\"],\r\n \"upload_date\": datetime.utcfromtimestamp(int(result[\"added\"])).strftime('%Y-%m-%d'),\r\n \"size\": convert_bytes_to_gb_mb(int(result[\"size\"])),\r\n \"uploader\": result[\"username\"]\r\n }\r\n \r\n torrents_list.append(temp_dict)\r\n\r\n if len(torrents_list) == self.LIMIT:\r\n break\r\n return torrents_list\r" }, { "identifier": "torrentio", "path": "torrents.py", "snippet": "class torrentio:\r\n def __init__(self, limit=10):\r\n self.BASE_URL = \"https://v3-cinemeta.strem.io\"\r\n self.TORRENT_URL = \"https://torrentio.strem.fun\"\r\n self.LIMIT = limit\r\n self.session = requests.Session()\r\n self.session.headers = {\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',\r\n }\r\n\r\n def get_magnet_link(self,data):\r\n magnet_link = 'magnet:?xt=urn:btih:' + data['infoHash']\r\n return magnet_link\r\n\r\n def search_library(self,query,_type):\r\n formated_query = query.replace(\" \",\"+\")\r\n if _type == \"1\":\r\n res = self.session.get(self.BASE_URL+\"/catalog/movie/top/search=\"+formated_query+\".json\").json()\r\n elif _type == \"2\":\r\n res = self.session.get(self.BASE_URL+\"/catalog/series/top/search=\"+formated_query+\".json\").json()\r\n \r\n return res[\"metas\"][:10]\r\n\r\n def get_series_details(self,_id):\r\n res = self.session.get(self.BASE_URL+\"/meta/series/\"+_id+\".json\").json()\r\n \r\n return res[\"meta\"][\"videos\"]\r\n\r\n\r\n def search_torrent(self,_id,_type):\r\n torrents_list = []\r\n if _type == \"1\":\r\n res = self.session.get(self.TORRENT_URL+\"/stream/movie/\"+_id+\".json\").json()\r\n elif _type == \"2\":\r\n res = self.session.get(self.TORRENT_URL+\"/stream/series/\"+_id+\".json\").json()\r\n\r\n for i in res[\"streams\"]:\r\n temp = i\r\n temp[\"upload_date\"] = \"N/A\"\r\n temp['uploader'] = i['name'].replace(\"\\n\",\" \")\r\n torrents_list.append(temp)\r\n\r\n if len(torrents_list) == self.LIMIT:\r\n break\r\n \r\n\r\n return torrents_list\r" }, { "identifier": "Seedr", "path": "downloader.py", "snippet": "class Seedr:\r\n def __init__(self):\r\n self.username = config[\"seedr\"][\"username\"]\r\n self.password = config[\"seedr\"][\"password\"]\r\n\r\n self.download_location = config[\"storage\"][\"output_path\"]\r\n self.unzip = config[\"storage\"][\"unzip\"]\r\n\r\n self.loggedin = False\r\n\r\n self.BASE_URL = \"https://www.seedr.cc\"\r\n\r\n self.session = requests.Session()\r\n self.session.headers = {\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',\r\n }\r\n self.login()\r\n\r\n def login(self):\r\n \r\n response = self.session.post(self.BASE_URL+'/auth/login', json={\r\n 'username': self.username,\r\n 'password': self.password,\r\n 'g-recaptcha-response': '',\r\n 'h-captcha-response': '',\r\n 'rememberme': 'on',\r\n })\r\n\r\n if response.status_code == 400 :\r\n print(f\"Login failed in seedr: {response.json()['reason_phrase']}\")\r\n elif response.status_code == 200:\r\n print(\"Logged in successfully in Seedr.\")\r\n self.loggedin = True\r\n else:\r\n print(f\"Error: {response.json()}\")\r\n \r\n def download(self,magnet_link):\r\n task_response = self.add_magnet_link(magnet_link)\r\n\r\n if task_response.get(\"reason_phrase\") == \"not_enough_space_added_to_wishlist\":\r\n print(\"Failed Reason: not_enough_space\")\r\n else:\r\n \r\n title = task_response[\"title\"]\r\n\r\n tries = 0\r\n failed = False\r\n print(\"Please wait...\")\r\n while self.is_downloading():\r\n\r\n if tries > 24:\r\n failed = True\r\n break\r\n\r\n tries += 1\r\n time.sleep(5)\r\n\r\n if failed:\r\n torrent_id = self.get_torrent_id(title)\r\n self.delete_torrent(torrent_id)\r\n print(\"Not able to download this try different torrent.\")\r\n else:\r\n folder_id = self.get_folder_id(title)\r\n if folder_id == -1:\r\n print(\"Failed to get download link..\")\r\n return\r\n link = self.get_download_link(folder_id)\r\n self.download_local(link)\r\n self.delete_folder(folder_id)\r\n\r\n def download_local(self,download_link):\r\n\r\n if os.path.exists(self.download_location) == False:\r\n os.mkdir(self.download_location)\r\n\r\n filename = urllib.parse.unquote(os.path.basename(urlparse(download_link).path))\r\n print(\"Saving File as :\",filename)\r\n\r\n output_path = self.download_location + \"/\" + filename\r\n\r\n with requests.get(download_link, stream=True) as r:\r\n r.raise_for_status()\r\n total_size_in_bytes= int(r.headers.get('content-length', 0))\r\n block_size = 10 * 1024 * 1024\r\n progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)\r\n with open(output_path, 'wb') as f:\r\n for chunk in r.iter_content(chunk_size=block_size):\r\n progress_bar.update(len(chunk))\r\n f.write(chunk)\r\n progress_bar.close()\r\n\r\n print(\"Downloaded.\")\r\n\r\n if self.unzip:\r\n print(\"Unzippping...\")\r\n extracted_dir = self.download_location\r\n os.makedirs(extracted_dir, exist_ok=True)\r\n with zipfile.ZipFile(output_path, 'r') as zip_ref:\r\n zip_ref.extractall(extracted_dir)\r\n\r\n os.remove(output_path)\r\n\r\n print(f\"Files extracted.\")\r\n\r\n def delete_torrent(self,torrent_id):\r\n response = self.session.post(self.BASE_URL+'/fs/batch/delete', data={\r\n 'delete_arr': '[{\"type\":\"torrent\",\"id\":\"'+torrent_id+'\"}]'\r\n })\r\n return response\r\n\r\n def delete_folder(self,folder_id):\r\n response = self.session.post(self.BASE_URL+'/fs/batch/delete', data={\r\n 'delete_arr': '[{\"type\":\"folder\",\"id\":\"'+folder_id+'\"}]'\r\n })\r\n return response\r\n\r\n def get_torrent_id(self,title):\r\n items = self.get_items()\r\n for torrent in items[\"torrents\"]:\r\n if torrent['name'] == title:\r\n return str(torrent['id'])\r\n try:\r\n return str(items[\"torrents\"][-1]['id'])\r\n except:\r\n return -1\r\n\r\n\r\n def get_folder_id(self,title):\r\n items = self.get_items()\r\n for folder in items[\"folders\"]:\r\n if folder['path'] == title:\r\n return str(folder['id'])\r\n return -1\r\n\r\n\r\n def get_download_link(self,folder_id):\r\n response = self.session.post(self.BASE_URL+'/download/archive', data={\r\n 'archive_arr[0][type]': 'folder',\r\n 'archive_arr[0][id]': folder_id\r\n })\r\n return response.json()[\"url\"]\r\n \r\n\r\n def get_items(self):\r\n response = self.session.get(self.BASE_URL+'/fs/folder/0/items')\r\n return response.json()\r\n\r\n def is_downloading(self):\r\n return len(self.get_items()[\"torrents\"]) != 0\r\n\r\n def add_magnet_link(self,magnet):\r\n response = self.session.post(self.BASE_URL+'/task', data={\r\n 'folder_id': '0',\r\n 'type': 'torrent',\r\n 'torrent_magnet': magnet\r\n })\r\n\r\n return response.json()\r" } ]
from torrents import x1337, thepiratebay, torrentio from downloader import Seedr
3,179
class Provider: def __init__(self) -> None: self.limit = 10 self.provider_name = "Torrentio"
class Provider: def __init__(self) -> None: self.limit = 10 self.provider_name = "Torrentio"
self.provider = torrentio(limit=self.limit)
2
2023-12-24 13:50:46+00:00
4k
ConnectAI-E/GitMaya
server/tasks/lark/pull_request.py
[ { "identifier": "get_bot_by_application_id", "path": "server/tasks/lark/base.py", "snippet": "def get_bot_by_application_id(app_id):\n application = (\n db.session.query(IMApplication)\n .filter(\n or_(\n IMApplication.app_id == app_id,\n IMApplication.id == app_id,\n )\n )\n .first()\n )\n if application:\n return (\n Bot(\n app_id=application.app_id,\n app_secret=application.app_secret,\n ),\n application,\n )\n return None, None" }, { "identifier": "get_git_object_by_message_id", "path": "server/tasks/lark/base.py", "snippet": "def get_git_object_by_message_id(message_id):\n \"\"\"\n 根据message_id区分Repo、Issue、PullRequest对象\n\n 参数:\n message_id:消息ID\n\n 返回值:\n repo:Repo对象,如果存在\n issue:Issue对象,如果存在\n pr:PullRequest对象,如果存在\n \"\"\"\n issue = (\n db.session.query(Issue)\n .filter(\n Issue.message_id == message_id,\n )\n .first()\n )\n if issue:\n return None, issue, None\n pr = (\n db.session.query(PullRequest)\n .filter(\n PullRequest.message_id == message_id,\n )\n .first()\n )\n if pr:\n return None, None, pr\n repo = (\n db.session.query(Repo)\n .filter(\n Repo.message_id == message_id,\n )\n .first()\n )\n if repo:\n return repo, None, None\n\n return None, None, None" }, { "identifier": "with_authenticated_github", "path": "server/tasks/lark/base.py", "snippet": "def with_authenticated_github():\n def decorate(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"\n 1. 这个装饰器用来统一处理错误消息\n 2. github rest api调用出错的时候抛出异常\n 3. 这个装饰器捕获特定的异常,给操作者特定的报错消息\n \"\"\"\n try:\n return func(*args, **kwargs)\n except GitHubPermissionError as e:\n try:\n from .manage import send_manage_fail_message\n\n app_id, message_id, content, raw_message = args[-4:]\n host = os.environ.get(\"DOMAIN\")\n send_manage_fail_message(\n f\"[请点击绑定 GitHub 账号后重试]({host}/api/github/oauth)\",\n app_id,\n message_id,\n content,\n raw_message,\n )\n except Exception as e:\n logging.error(e)\n except Exception as e:\n raise e\n\n return wrapper\n\n return decorate" } ]
import json import logging from celery_app import app, celery from connectai.lark.sdk import FeishuTextMessage from model.schema import ( ChatGroup, CodeApplication, CodeUser, IMUser, PullRequest, Repo, Team, TeamMember, db, ) from model.team import get_assignees_by_openid from utils.github.repo import GitHubAppRepo from utils.lark.pr_card import PullCard from utils.lark.pr_manual import ( PrManual, PullRequestDiff, PullRequestLog, PullRequestView, ) from utils.lark.pr_tip_failed import PrTipFailed from utils.lark.pr_tip_success import PrTipSuccess from .base import ( get_bot_by_application_id, get_git_object_by_message_id, with_authenticated_github, )
3,039
.filter( ChatGroup.repo_id == pr.repo_id, ) .first() ) if chat_group and pr.message_id: bot, _ = get_bot_by_application_id(chat_group.im_application_id) result = bot.reply( pr.message_id, FeishuTextMessage(f"@{user_name}: {comment}"), ).json() return result return False @celery.task() def update_pull_request_card(pr_id: str) -> bool | dict: """Update PullRequest card message. Args: pr_id (str): PullRequest.id. Returns: bool | dict: True or False or FeishuMessage """ pr = db.session.query(PullRequest).filter(PullRequest.id == pr_id).first() if pr: chat_group = ( db.session.query(ChatGroup) .filter( ChatGroup.repo_id == pr.repo_id, ) .first() ) repo = db.session.query(Repo).filter(Repo.id == pr.repo_id).first() if chat_group and repo: bot, application = get_bot_by_application_id(chat_group.im_application_id) team = db.session.query(Team).filter(Team.id == application.team_id).first() if application and team: repo_url = f"https://github.com/{team.name}/{repo.name}" message = gen_pr_card_by_pr(pr, repo_url, team) result = bot.update(pr.message_id, message).json() return result return False def _get_github_app(app_id, message_id, content, data, *args, **kwargs): root_id = data["event"]["message"].get( "root_id", data["event"]["message"]["message_id"] ) openid = data["event"]["sender"]["sender_id"]["open_id"] _, _, pr = get_git_object_by_message_id(root_id) if not pr: return send_pull_request_failed_tip( "找不到 Pull Request", app_id, message_id, content, data, *args, **kwargs ) repo = ( db.session.query(Repo) .filter( Repo.id == pr.repo_id, Repo.status == 0, ) .first() ) if not repo: return send_pull_request_failed_tip( "找不到项目", app_id, message_id, content, data, *args, **kwargs ) code_application = ( db.session.query(CodeApplication) .filter( CodeApplication.id == repo.application_id, ) .first() ) if not code_application: return send_pull_request_failed_tip( "找不到对应的项目", app_id, message_id, content, data, *args, **kwargs ) team = ( db.session.query(Team) .filter( Team.id == code_application.team_id, ) .first() ) if not team: return send_pull_request_failed_tip( "找不到对应的项目", app_id, message_id, content, data, *args, **kwargs ) code_user_id = ( db.session.query(CodeUser.user_id) .join( TeamMember, TeamMember.code_user_id == CodeUser.id, ) .join( IMUser, IMUser.id == TeamMember.im_user_id, ) .filter( IMUser.openid == openid, TeamMember.team_id == team.id, ) .limit(1) .scalar() ) github_app = GitHubAppRepo(code_application.installation_id, user_id=code_user_id) return github_app, team, repo, pr, root_id, openid @celery.task()
@celery.task() def send_pull_request_failed_tip( content, app_id, message_id, *args, bot=None, **kwargs ): """send new card message to user. Args: app_id: IMApplication.app_id. message_id: lark message id. content: error message """ if not bot: bot, _ = get_bot_by_application_id(app_id) message = PrTipFailed(content=content) return bot.reply(message_id, message).json() @celery.task() def send_pull_request_success_tip( content, app_id, message_id, *args, bot=None, **kwargs ): """send new repo card message to user. Args: app_id: IMApplication.app_id. message_id: lark message id. content: success message """ if not bot: bot, _ = get_bot_by_application_id(app_id) message = PrTipSuccess(content=content) return bot.reply(message_id, message).json() def gen_pr_card_by_pr(pr: PullRequest, repo_url, team, maunal=False): assignees = pr.extra.get("assignees", []) reviewers = pr.extra.get("requested_reviewers", []) if len(assignees): assignees = [ openid for openid, in db.session.query(IMUser.openid) .join(TeamMember, TeamMember.im_user_id == IMUser.id) .join( CodeUser, CodeUser.id == TeamMember.code_user_id, ) .filter( TeamMember.team_id == team.id, CodeUser.name.in_([assignee["login"] for assignee in assignees]), ) .all() ] if len(reviewers): reviewers = [ openid for openid, in db.session.query(IMUser.openid) .join(TeamMember, TeamMember.im_user_id == IMUser.id) .join( CodeUser, CodeUser.id == TeamMember.code_user_id, ) .filter( TeamMember.team_id == team.id, CodeUser.name.in_([reviewer["login"] for reviewer in reviewers]), ) .all() ] labels = [i["name"] for i in pr.extra.get("labels", [])] status = pr.extra.get("state", "open") merged = pr.extra.get("merged") if status == "open": status = "待完成" elif status == "closed": status = "已关闭" if maunal: return PrManual( repo_url=repo_url, pr_id=pr.pull_request_number, persons=[], # 就没用上 assignees=assignees, tags=labels, merged=merged, ) return PullCard( repo_url=repo_url, id=pr.pull_request_number, title=pr.title, description=pr.description, base=pr.extra.get("base", {}), head=pr.extra.get("head", {}), status=status, merged=merged, persons=[], # TODO:应该是所有有写权限的人 assignees=assignees, reviewers=reviewers, labels=labels, updated=pr.modified.strftime("%Y-%m-%d %H:%M:%S"), ) @celery.task() def send_pull_request_manual(app_id, message_id, content, data, *args, **kwargs): root_id = data["event"]["message"]["root_id"] _, _, pr = get_git_object_by_message_id(root_id) if not pr: return send_pull_request_failed_tip( "找不到 Pull Request", app_id, message_id, content, data, *args, **kwargs ) repo = ( db.session.query(Repo) .filter( Repo.id == pr.repo_id, Repo.status == 0, ) .first() ) if not repo: return send_pull_request_failed_tip( "找不到项目", app_id, message_id, content, data, *args, **kwargs ) bot, application = get_bot_by_application_id(app_id) if not application: return send_pull_request_failed_tip( "找不到对应的应用", app_id, message_id, content, data, *args, bot=bot, **kwargs ) team = ( db.session.query(Team) .filter( Team.id == application.team_id, ) .first() ) if not team: return send_pull_request_failed_tip( "找不到对应的项目", app_id, message_id, content, data, *args, bot=bot, **kwargs ) repo_url = f"https://github.com/{team.name}/{repo.name}" message = gen_pr_card_by_pr(pr, repo_url, team, maunal=True) # 回复到话题内部 return bot.reply(message_id, message).json() def send_pull_request_url_message( app_id, message_id, content, data, *args, typ="view", **kwargs ): root_id = data["event"]["message"]["root_id"] _, _, pr = get_git_object_by_message_id(root_id) if not pr: return send_pull_request_failed_tip( "找不到 Pull Request", app_id, message_id, content, data, *args, **kwargs ) repo = ( db.session.query(Repo) .filter( Repo.id == pr.repo_id, Repo.status == 0, ) .first() ) if not repo: return send_pull_request_failed_tip( "找不到项目", app_id, message_id, content, data, *args, **kwargs ) bot, application = get_bot_by_application_id(app_id) if not application: return send_pull_request_failed_tip( "找不到对应的应用", app_id, message_id, content, data, *args, bot=bot, **kwargs ) team = ( db.session.query(Team) .filter( Team.id == application.team_id, ) .first() ) if not team: return send_pull_request_failed_tip( "找不到对应的项目", app_id, message_id, content, data, *args, bot=bot, **kwargs ) repo_url = f"https://github.com/{team.name}/{repo.name}" if "view" == typ: message = PullRequestView( repo_url=repo_url, pr_id=pr.pull_request_number, ) elif "log" == typ: message = PullRequestLog( repo_url=repo_url, pr_id=pr.pull_request_number, ) elif "diff" == typ: message = PullRequestDiff( repo_url=repo_url, pr_id=pr.pull_request_number, ) else: return send_pull_request_failed_tip( "找不到对应的项目", app_id, message_id, content, data, *args, bot=bot, **kwargs ) # 回复到话题内部 return bot.reply(message_id, message).json() @celery.task() def send_pull_request_view_message(app_id, message_id, content, data, *args, **kwargs): return send_pull_request_url_message( app_id, message_id, content, data, *args, typ="view", **kwargs ) @celery.task() def send_pull_request_log_message(app_id, message_id, content, data, *args, **kwargs): return send_pull_request_url_message( app_id, message_id, content, data, *args, typ="log", **kwargs ) @celery.task() def send_pull_request_diff_message(app_id, message_id, content, data, *args, **kwargs): return send_pull_request_url_message( app_id, message_id, content, data, *args, typ="diff", **kwargs ) @celery.task() def send_pull_request_card(pull_request_id: str, assignees: list[str] = []): """send new PullRequest card message to user. Args: pull_request_id: PullRequest.id. """ pr = db.session.query(PullRequest).filter(PullRequest.id == pull_request_id).first() if pr: chat_group = ( db.session.query(ChatGroup) .filter( ChatGroup.repo_id == pr.repo_id, ) .first() ) repo = db.session.query(Repo).filter(Repo.id == pr.repo_id).first() if chat_group and repo: bot, application = get_bot_by_application_id(chat_group.im_application_id) team = db.session.query(Team).filter(Team.id == application.team_id).first() if application and team: repo_url = f"https://github.com/{team.name}/{repo.name}" message = gen_pr_card_by_pr(pr, repo_url, team) result = bot.send( chat_group.chat_id, message, receive_id_type="chat_id" ).json() message_id = result.get("data", {}).get("message_id") if message_id: # save message_id pr.message_id = message_id db.session.commit() users = ( "".join( [f'<at user_id="{open_id}"></at>' for open_id in assignees] ) if len(assignees) else "" ) first_message_result = bot.reply( message_id, # TODO 第一条话题消息,直接放repo_url FeishuTextMessage( f"{users}{repo_url}/pull/{pr.pull_request_number}" ), reply_in_thread=True, ).json() logging.info("debug first_message_result %r", first_message_result) return result return False @celery.task() def send_pull_request_comment(pull_request_id, comment, user_name: str): """send new pull_request comment message to user. Args: pull_request_id: PullRequest.id. comment: str """ pr = db.session.query(PullRequest).filter(PullRequest.id == pull_request_id).first() if pr: chat_group = ( db.session.query(ChatGroup) .filter( ChatGroup.repo_id == pr.repo_id, ) .first() ) if chat_group and pr.message_id: bot, _ = get_bot_by_application_id(chat_group.im_application_id) result = bot.reply( pr.message_id, FeishuTextMessage(f"@{user_name}: {comment}"), ).json() return result return False @celery.task() def update_pull_request_card(pr_id: str) -> bool | dict: """Update PullRequest card message. Args: pr_id (str): PullRequest.id. Returns: bool | dict: True or False or FeishuMessage """ pr = db.session.query(PullRequest).filter(PullRequest.id == pr_id).first() if pr: chat_group = ( db.session.query(ChatGroup) .filter( ChatGroup.repo_id == pr.repo_id, ) .first() ) repo = db.session.query(Repo).filter(Repo.id == pr.repo_id).first() if chat_group and repo: bot, application = get_bot_by_application_id(chat_group.im_application_id) team = db.session.query(Team).filter(Team.id == application.team_id).first() if application and team: repo_url = f"https://github.com/{team.name}/{repo.name}" message = gen_pr_card_by_pr(pr, repo_url, team) result = bot.update(pr.message_id, message).json() return result return False def _get_github_app(app_id, message_id, content, data, *args, **kwargs): root_id = data["event"]["message"].get( "root_id", data["event"]["message"]["message_id"] ) openid = data["event"]["sender"]["sender_id"]["open_id"] _, _, pr = get_git_object_by_message_id(root_id) if not pr: return send_pull_request_failed_tip( "找不到 Pull Request", app_id, message_id, content, data, *args, **kwargs ) repo = ( db.session.query(Repo) .filter( Repo.id == pr.repo_id, Repo.status == 0, ) .first() ) if not repo: return send_pull_request_failed_tip( "找不到项目", app_id, message_id, content, data, *args, **kwargs ) code_application = ( db.session.query(CodeApplication) .filter( CodeApplication.id == repo.application_id, ) .first() ) if not code_application: return send_pull_request_failed_tip( "找不到对应的项目", app_id, message_id, content, data, *args, **kwargs ) team = ( db.session.query(Team) .filter( Team.id == code_application.team_id, ) .first() ) if not team: return send_pull_request_failed_tip( "找不到对应的项目", app_id, message_id, content, data, *args, **kwargs ) code_user_id = ( db.session.query(CodeUser.user_id) .join( TeamMember, TeamMember.code_user_id == CodeUser.id, ) .join( IMUser, IMUser.id == TeamMember.im_user_id, ) .filter( IMUser.openid == openid, TeamMember.team_id == team.id, ) .limit(1) .scalar() ) github_app = GitHubAppRepo(code_application.installation_id, user_id=code_user_id) return github_app, team, repo, pr, root_id, openid @celery.task()
@with_authenticated_github()
2
2023-12-22 02:43:21+00:00
4k
camenduru/AnyDoor-online-hf
dinov2/dinov2/layers/block.py
[ { "identifier": "Attention", "path": "dinov2/dinov2/layers/attention.py", "snippet": "class Attention(nn.Module):\n def __init__(\n self,\n dim: int,\n num_heads: int = 8,\n qkv_bias: bool = False,\n proj_bias: bool = True,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n ) -> None:\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = head_dim**-0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim, bias=proj_bias)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x: Tensor) -> Tensor:\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\n q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]\n attn = q @ k.transpose(-2, -1)\n\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x" }, { "identifier": "MemEffAttention", "path": "dinov2/dinov2/layers/attention.py", "snippet": "class MemEffAttention(Attention):\n def forward(self, x: Tensor, attn_bias=None) -> Tensor:\n if not XFORMERS_AVAILABLE:\n assert attn_bias is None, \"xFormers is required for nested tensors usage\"\n return super().forward(x)\n\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)\n\n q, k, v = unbind(qkv, 2)\n\n if attn_bias is not None:\n self_att_op = fmha.MemoryEfficientAttentionFlashAttentionOp\n else:\n self_att_op = None\n x = memory_efficient_attention(q, k, v, attn_bias=attn_bias, op=self_att_op)\n x = x.reshape([B, N, C])\n\n x = self.proj(x)\n x = self.proj_drop(x)\n return x" }, { "identifier": "DropPath", "path": "dinov2/dinov2/layers/drop_path.py", "snippet": "class DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\"\"\"\n\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training)" }, { "identifier": "LayerScale", "path": "dinov2/dinov2/layers/layer_scale.py", "snippet": "class LayerScale(nn.Module):\n def __init__(\n self,\n dim: int,\n init_values: Union[float, Tensor] = 1e-5,\n inplace: bool = False,\n ) -> None:\n super().__init__()\n self.inplace = inplace\n self.gamma = nn.Parameter(init_values * torch.ones(dim))\n\n def forward(self, x: Tensor) -> Tensor:\n return x.mul_(self.gamma) if self.inplace else x * self.gamma" }, { "identifier": "Mlp", "path": "dinov2/dinov2/layers/mlp.py", "snippet": "class Mlp(nn.Module):\n def __init__(\n self,\n in_features: int,\n hidden_features: Optional[int] = None,\n out_features: Optional[int] = None,\n act_layer: Callable[..., nn.Module] = nn.GELU,\n drop: float = 0.0,\n bias: bool = True,\n ) -> None:\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features, bias=bias)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features, bias=bias)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x" } ]
import logging import torch from typing import Callable, List, Any, Tuple, Dict from torch import nn, Tensor from .attention import Attention, MemEffAttention from .drop_path import DropPath from .layer_scale import LayerScale from .mlp import Mlp from xformers.ops import fmha from xformers.ops import scaled_index_add, index_select_cat
3,003
if self.training and self.sample_drop_ratio > 0.1: # the overhead is compensated only for a drop path rate larger than 0.1 x = drop_add_residual_stochastic_depth( x, residual_func=attn_residual_func, sample_drop_ratio=self.sample_drop_ratio, ) x = drop_add_residual_stochastic_depth( x, residual_func=ffn_residual_func, sample_drop_ratio=self.sample_drop_ratio, ) elif self.training and self.sample_drop_ratio > 0.0: x = x + self.drop_path1(attn_residual_func(x)) x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2 else: x = x + attn_residual_func(x) x = x + ffn_residual_func(x) return x def drop_add_residual_stochastic_depth( x: Tensor, residual_func: Callable[[Tensor], Tensor], sample_drop_ratio: float = 0.0, ) -> Tensor: # 1) extract subset using permutation b, n, d = x.shape sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1) brange = (torch.randperm(b, device=x.device))[:sample_subset_size] x_subset = x[brange] # 2) apply residual_func to get residual residual = residual_func(x_subset) x_flat = x.flatten(1) residual = residual.flatten(1) residual_scale_factor = b / sample_subset_size # 3) add the residual x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor) return x_plus_residual.view_as(x) def get_branges_scales(x, sample_drop_ratio=0.0): b, n, d = x.shape sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1) brange = (torch.randperm(b, device=x.device))[:sample_subset_size] residual_scale_factor = b / sample_subset_size return brange, residual_scale_factor def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None): if scaling_vector is None: x_flat = x.flatten(1) residual = residual.flatten(1) x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor) else: x_plus_residual = scaled_index_add( x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor ) return x_plus_residual attn_bias_cache: Dict[Tuple, Any] = {} def get_attn_bias_and_cat(x_list, branges=None): """ this will perform the index select, cat the tensors, and provide the attn_bias from cache """ batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list] all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list)) if all_shapes not in attn_bias_cache.keys(): seqlens = [] for b, x in zip(batch_sizes, x_list): for _ in range(b): seqlens.append(x.shape[1]) attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens) attn_bias._batch_sizes = batch_sizes attn_bias_cache[all_shapes] = attn_bias if branges is not None: cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1]) else: tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list) cat_tensors = torch.cat(tensors_bs1, dim=1) return attn_bias_cache[all_shapes], cat_tensors def drop_add_residual_stochastic_depth_list( x_list: List[Tensor], residual_func: Callable[[Tensor, Any], Tensor], sample_drop_ratio: float = 0.0, scaling_vector=None, ) -> Tensor: # 1) generate random set of indices for dropping samples in the batch branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list] branges = [s[0] for s in branges_scales] residual_scale_factors = [s[1] for s in branges_scales] # 2) get attention bias and index+concat the tensors attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges) # 3) apply residual_func to get residual, and split the result residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore outputs = [] for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors): outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x)) return outputs class NestedTensorBlock(Block): def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]: """ x_list contains a list of tensors to nest together and run """
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # References: # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py logger = logging.getLogger("dinov2") try: XFORMERS_AVAILABLE = True except ImportError: logger.warning("xFormers not available") XFORMERS_AVAILABLE = False class Block(nn.Module): def __init__( self, dim: int, num_heads: int, mlp_ratio: float = 4.0, qkv_bias: bool = False, proj_bias: bool = True, ffn_bias: bool = True, drop: float = 0.0, attn_drop: float = 0.0, init_values=None, drop_path: float = 0.0, act_layer: Callable[..., nn.Module] = nn.GELU, norm_layer: Callable[..., nn.Module] = nn.LayerNorm, attn_class: Callable[..., nn.Module] = Attention, ffn_layer: Callable[..., nn.Module] = Mlp, ) -> None: super().__init__() # print(f"biases: qkv: {qkv_bias}, proj: {proj_bias}, ffn: {ffn_bias}") self.norm1 = norm_layer(dim) self.attn = attn_class( dim, num_heads=num_heads, qkv_bias=qkv_bias, proj_bias=proj_bias, attn_drop=attn_drop, proj_drop=drop, ) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = ffn_layer( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, bias=ffn_bias, ) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.sample_drop_ratio = drop_path def forward(self, x: Tensor) -> Tensor: def attn_residual_func(x: Tensor) -> Tensor: return self.ls1(self.attn(self.norm1(x))) def ffn_residual_func(x: Tensor) -> Tensor: return self.ls2(self.mlp(self.norm2(x))) if self.training and self.sample_drop_ratio > 0.1: # the overhead is compensated only for a drop path rate larger than 0.1 x = drop_add_residual_stochastic_depth( x, residual_func=attn_residual_func, sample_drop_ratio=self.sample_drop_ratio, ) x = drop_add_residual_stochastic_depth( x, residual_func=ffn_residual_func, sample_drop_ratio=self.sample_drop_ratio, ) elif self.training and self.sample_drop_ratio > 0.0: x = x + self.drop_path1(attn_residual_func(x)) x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2 else: x = x + attn_residual_func(x) x = x + ffn_residual_func(x) return x def drop_add_residual_stochastic_depth( x: Tensor, residual_func: Callable[[Tensor], Tensor], sample_drop_ratio: float = 0.0, ) -> Tensor: # 1) extract subset using permutation b, n, d = x.shape sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1) brange = (torch.randperm(b, device=x.device))[:sample_subset_size] x_subset = x[brange] # 2) apply residual_func to get residual residual = residual_func(x_subset) x_flat = x.flatten(1) residual = residual.flatten(1) residual_scale_factor = b / sample_subset_size # 3) add the residual x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor) return x_plus_residual.view_as(x) def get_branges_scales(x, sample_drop_ratio=0.0): b, n, d = x.shape sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1) brange = (torch.randperm(b, device=x.device))[:sample_subset_size] residual_scale_factor = b / sample_subset_size return brange, residual_scale_factor def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None): if scaling_vector is None: x_flat = x.flatten(1) residual = residual.flatten(1) x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor) else: x_plus_residual = scaled_index_add( x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor ) return x_plus_residual attn_bias_cache: Dict[Tuple, Any] = {} def get_attn_bias_and_cat(x_list, branges=None): """ this will perform the index select, cat the tensors, and provide the attn_bias from cache """ batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list] all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list)) if all_shapes not in attn_bias_cache.keys(): seqlens = [] for b, x in zip(batch_sizes, x_list): for _ in range(b): seqlens.append(x.shape[1]) attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens) attn_bias._batch_sizes = batch_sizes attn_bias_cache[all_shapes] = attn_bias if branges is not None: cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1]) else: tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list) cat_tensors = torch.cat(tensors_bs1, dim=1) return attn_bias_cache[all_shapes], cat_tensors def drop_add_residual_stochastic_depth_list( x_list: List[Tensor], residual_func: Callable[[Tensor, Any], Tensor], sample_drop_ratio: float = 0.0, scaling_vector=None, ) -> Tensor: # 1) generate random set of indices for dropping samples in the batch branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list] branges = [s[0] for s in branges_scales] residual_scale_factors = [s[1] for s in branges_scales] # 2) get attention bias and index+concat the tensors attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges) # 3) apply residual_func to get residual, and split the result residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore outputs = [] for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors): outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x)) return outputs class NestedTensorBlock(Block): def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]: """ x_list contains a list of tensors to nest together and run """
assert isinstance(self.attn, MemEffAttention)
1
2023-12-25 04:48:34+00:00
4k
yixinNB/pyscrcpy
pyscrcpy/core.py
[ { "identifier": "EVENT_DISCONNECT", "path": "pyscrcpy/const.py", "snippet": "EVENT_DISCONNECT = \"disconnect\"" }, { "identifier": "EVENT_FRAME", "path": "pyscrcpy/const.py", "snippet": "EVENT_FRAME = \"frame\"" }, { "identifier": "EVENT_INIT", "path": "pyscrcpy/const.py", "snippet": "EVENT_INIT = \"init\"" }, { "identifier": "LOCK_SCREEN_ORIENTATION_UNLOCKED", "path": "pyscrcpy/const.py", "snippet": "LOCK_SCREEN_ORIENTATION_UNLOCKED = -1" }, { "identifier": "EVENT_ONCHANGE", "path": "pyscrcpy/const.py", "snippet": "EVENT_ONCHANGE = \"onchange\"" }, { "identifier": "ControlSender", "path": "pyscrcpy/control.py", "snippet": "class ControlSender:\n def __init__(self, parent):\n self.parent = parent # client object\n self.adbutil_devices = parent.device\n\n @inject(const.TYPE_INJECT_KEYCODE)\n def keycode(\n self, keycode: int, action: int = const.ACTION_DOWN, repeat: int = 0\n ) -> bytes:\n \"\"\"\n Send keycode to device\n\n Args:\n keycode: const.KEYCODE_*\n action: ACTION_DOWN | ACTION_UP\n repeat: repeat count\n \"\"\"\n return struct.pack(\">Biii\", action, keycode, repeat, 0)\n\n @inject(const.TYPE_INJECT_TEXT)\n def text(self, text: str) -> bytes:\n \"\"\"\n Send text to device\n\n Args:\n text: text to send\n \"\"\"\n\n buffer = text.encode(\"utf-8\")\n return struct.pack(\">i\", len(buffer)) + buffer\n\n # @inject(const.TYPE_INJECT_TOUCH_EVENT)\n # def touch(self, x: int, y: int, action: int = const.ACTION_DOWN, touch_id: int = -1) -> bytes:\n # \"\"\"\n # Touch screen\n #\n # Args:\n # x: horizontal position\n # y: vertical position\n # action: ACTION_DOWN | ACTION_UP | ACTION_MOVE\n # touch_id: Default using virtual id -1, you can specify it to emulate multi finger touch\n # \"\"\"\n # x, y = max(x, 0), max(y, 0)\n # return struct.pack(\n # \">BqiiHHHi\",\n # action,\n # touch_id,\n # int(x),\n # int(y),\n # int(self.parent.resolution[0]),\n # int(self.parent.resolution[1]),\n # 0xFFFF,\n # 1,\n # )\n def touch(self, x, y):\n self.adbutil_devices.shell(f\"input tap {x} {y}\")\n\n @inject(const.TYPE_INJECT_SCROLL_EVENT)\n def scroll(self, x: int, y: int, h: int, v: int) -> bytes:\n \"\"\"\n Scroll screen\n\n Args:\n x: horizontal position\n y: vertical position\n h: horizontal movement\n v: vertical movement\n \"\"\"\n\n x, y = max(x, 0), max(y, 0)\n return struct.pack(\n \">iiHHii\",\n int(x),\n int(y),\n int(self.parent.resolution[0]),\n int(self.parent.resolution[1]),\n int(h),\n int(v),\n )\n\n @inject(const.TYPE_BACK_OR_SCREEN_ON)\n def back_or_turn_screen_on(self, action: int = const.ACTION_DOWN) -> bytes:\n \"\"\"\n If the screen is off, it is turned on only on ACTION_DOWN\n\n Args:\n action: ACTION_DOWN | ACTION_UP\n \"\"\"\n return struct.pack(\">B\", action)\n\n @inject(const.TYPE_EXPAND_NOTIFICATION_PANEL)\n def expand_notification_panel(self) -> bytes:\n \"\"\"\n Expand notification panel\n \"\"\"\n return b\"\"\n\n @inject(const.TYPE_EXPAND_SETTINGS_PANEL)\n def expand_settings_panel(self) -> bytes:\n \"\"\"\n Expand settings panel\n \"\"\"\n return b\"\"\n\n @inject(const.TYPE_COLLAPSE_PANELS)\n def collapse_panels(self) -> bytes:\n \"\"\"\n Collapse all panels\n \"\"\"\n return b\"\"\n\n def get_clipboard(self) -> str:\n \"\"\"\n Get clipboard\n \"\"\"\n # Since this function need socket response, we can't auto inject it any more\n s: socket.socket = self.parent.control_socket\n\n with self.parent.control_socket_lock:\n # Flush socket\n s.setblocking(False)\n while True:\n try:\n s.recv(1024)\n except BlockingIOError:\n break\n s.setblocking(True)\n\n # Read package\n package = struct.pack(\">B\", const.TYPE_GET_CLIPBOARD)\n s.send(package)\n (code,) = struct.unpack(\">B\", s.recv(1))\n assert code == 0\n (length,) = struct.unpack(\">i\", s.recv(4))\n\n return s.recv(length).decode(\"utf-8\")\n\n @inject(const.TYPE_SET_CLIPBOARD)\n def set_clipboard(self, text: str, paste: bool = False) -> bytes:\n \"\"\"\n Set clipboard\n\n Args:\n text: the string you want to set\n paste: paste now\n \"\"\"\n buffer = text.encode(\"utf-8\")\n return struct.pack(\">?i\", paste, len(buffer)) + buffer\n\n @inject(const.TYPE_SET_SCREEN_POWER_MODE)\n def set_screen_power_mode(\n self, mode: int = const.POWER_MODE_NORMAL\n ) -> bytes:\n \"\"\"\n Set screen power mode\n\n Args:\n mode: POWER_MODE_OFF | POWER_MODE_NORMAL\n \"\"\"\n return struct.pack(\">b\", mode)\n\n @inject(const.TYPE_ROTATE_DEVICE)\n def rotate_device(self) -> bytes:\n \"\"\"\n Rotate device\n \"\"\"\n return b\"\"\n\n def swipe(\n self,\n start_x: int,\n start_y: int,\n end_x: int,\n end_y: int,\n move_step_length: int = 5,\n move_steps_delay: float = 0.005,\n ) -> None:\n \"\"\"\n Swipe on screen\n\n Args:\n start_x: start horizontal position\n start_y: start vertical position\n end_x: start horizontal position\n end_y: end vertical position\n move_step_length: length per step\n move_steps_delay: sleep seconds after each step\n :return:\n \"\"\"\n\n self.touch(start_x, start_y, const.ACTION_DOWN)\n next_x = start_x\n next_y = start_y\n\n if end_x > self.parent.resolution[0]:\n end_x = self.parent.resolution[0]\n\n if end_y > self.parent.resolution[1]:\n end_y = self.parent.resolution[1]\n\n decrease_x = True if start_x > end_x else False\n decrease_y = True if start_y > end_y else False\n while True:\n if decrease_x:\n next_x -= move_step_length\n if next_x < end_x:\n next_x = end_x\n else:\n next_x += move_step_length\n if next_x > end_x:\n next_x = end_x\n\n if decrease_y:\n next_y -= move_step_length\n if next_y < end_y:\n next_y = end_y\n else:\n next_y += move_step_length\n if next_y > end_y:\n next_y = end_y\n\n self.touch(next_x, next_y, const.ACTION_MOVE)\n\n if next_x == end_x and next_y == end_y:\n self.touch(next_x, next_y, const.ACTION_UP)\n break\n time.sleep(move_steps_delay)" } ]
import os import abc import socket import struct import threading import time import numpy as np import numpy.typing as npt import cv2 as cv import cv2 from pathlib import Path from time import sleep from typing import Any, Callable, Optional, Tuple, Union from adbutils import AdbConnection, AdbDevice, AdbError, Network, adb from av.codec import CodecContext # type: ignore from av.error import InvalidDataError # type: ignore from loguru import logger from .const import EVENT_DISCONNECT, EVENT_FRAME, EVENT_INIT, LOCK_SCREEN_ORIENTATION_UNLOCKED, EVENT_ONCHANGE from .control import ControlSender
3,389
connection is alive. lock_screen_orientation: lock screen in a particular orientation. The available screen orientation are specify in const.py in variables LOCK_SCREEN_ORIENTATION* """ # Params挪到后面去 self.max_size = max_size self.bitrate = bitrate self.max_fps = max_fps self.block_frame = block_frame self.stay_awake = stay_awake self.lock_screen_orientation = lock_screen_orientation self.skip_same_frame = skip_same_frame self.min_frame_interval = 1 / max_fps if device is None: try: device = adb.device_list()[0] except IndexError: raise Exception("Cannot connect to phone") elif isinstance(device, str): device = adb.device(serial=device) self.device = device self.listeners = dict(frame=[], init=[], disconnect=[], onchange=[]) # User accessible self.last_frame: Optional[np.ndarray] = None self.resolution: Optional[Tuple[int, int]] = None self.device_name: Optional[str] = None self.control = ControlSender(self) # Need to destroy self.alive = False self.__server_stream: Optional[AdbConnection] = None self.__video_socket: Optional[socket.socket] = None self.control_socket: Optional[socket.socket] = None self.control_socket_lock = threading.Lock() def __init_server_connection(self) -> None: """ Connect to android server, there will be two sockets: video and control socket. This method will also set resolution property. """ for _ in range(30): # 超时 写死 try: self.__video_socket = self.device.create_connection( Network.LOCAL_ABSTRACT, "scrcpy" ) break except AdbError: sleep(0.1) pass else: raise ConnectionError("Failed to connect scrcpy-server after 3 seconds") dummy_byte = self.__video_socket.recv(1) if not len(dummy_byte): raise ConnectionError("Did not receive Dummy Byte!") self.control_socket = self.device.create_connection( Network.LOCAL_ABSTRACT, "scrcpy" ) self.device_name = self.__video_socket.recv(64).decode("utf-8").rstrip("\x00") if not len(self.device_name): raise ConnectionError("Did not receive Device Name!") res = self.__video_socket.recv(4) self.resolution = struct.unpack(">HH", res) self.__video_socket.setblocking(False) def __deploy_server(self) -> None: """ Deploy server to android device. Push the scrcpy-server.jar into the Android device using the adb.push(...). Then a basic connection between client and server is established. """ cmd = [ "CLASSPATH=/data/local/tmp/scrcpy-server.jar", "app_process", "/", "com.genymobile.scrcpy.Server", VERSION, # Scrcpy server version "info", # Log level: info, verbose... f"{self.max_size}", # Max screen width (long side) f"{self.bitrate}", # Bitrate of video f"{self.max_fps}", # Max frame per second f"{self.lock_screen_orientation}", # Lock screen orientation "true", # Tunnel forward "-", # Crop screen "false", # Send frame rate to client "true", # Control enabled "0", # Display id "false", # Show touches "true" if self.stay_awake else "false", # Stay awake "-", # Codec (video encoding) options "-", # Encoder name "false", # Power off screen after server closed ] self.device.push(JAR, "/data/local/tmp/") self.__server_stream: AdbConnection = self.device.shell(cmd, stream=True) def start(self, threaded: bool = False) -> None: """ Start the client-server connection. In order to avoid unpredictable behaviors, this method must be called after the on_init and on_frame callback are specify. Args: threaded : If set to True the stream loop willl run in a separated thread. This mean that the code after client.strart() will be run. Otherwise the client.start() method starts a endless loop and the code after this method will never run. todo new_thread """ assert self.alive is False self.__deploy_server() self.__init_server_connection() self.alive = True
Frame = npt.NDArray[np.int8] VERSION = "1.20" HERE = Path(__file__).resolve().parent JAR = HERE / f"scrcpy-server.jar" class Client: def __init__( self, device: Optional[Union[AdbDevice, str]] = None, max_size: int = 0, bitrate: int = 8000000, max_fps: int = 0, block_frame: bool = True, stay_awake: bool = True, lock_screen_orientation: int = LOCK_SCREEN_ORIENTATION_UNLOCKED, skip_same_frame=False ): """ [ok]Create a scrcpy client. The client won't be started until you call .start() Args: device: Android device to coennect to. Colud be also specify by serial string. If device is None the client try to connect to the first available device in adb deamon. max_size: Specify the maximum dimension of the video stream. This dimensioin refer both to width and hight.0: no limit[已校验, max size of width or height] bitrate: bitrate max_fps: Maximum FPS (Frame Per Second) of the video stream. If it is set to 0 it means that there is not limit to FPS. This feature is supported by android 10 or newer. [flip]: 没有这个参数, 会自动处理 block_frame: If set to true, the on_frame callbacks will be only apply on not empty frames. Otherwise try to apply on_frame callbacks on every frame, but this could raise exceptions in callbacks if they are not able to handle None value for frame. True:跳过空白帧 stay_awake: keep Android device awake while the client-server connection is alive. lock_screen_orientation: lock screen in a particular orientation. The available screen orientation are specify in const.py in variables LOCK_SCREEN_ORIENTATION* """ # Params挪到后面去 self.max_size = max_size self.bitrate = bitrate self.max_fps = max_fps self.block_frame = block_frame self.stay_awake = stay_awake self.lock_screen_orientation = lock_screen_orientation self.skip_same_frame = skip_same_frame self.min_frame_interval = 1 / max_fps if device is None: try: device = adb.device_list()[0] except IndexError: raise Exception("Cannot connect to phone") elif isinstance(device, str): device = adb.device(serial=device) self.device = device self.listeners = dict(frame=[], init=[], disconnect=[], onchange=[]) # User accessible self.last_frame: Optional[np.ndarray] = None self.resolution: Optional[Tuple[int, int]] = None self.device_name: Optional[str] = None self.control = ControlSender(self) # Need to destroy self.alive = False self.__server_stream: Optional[AdbConnection] = None self.__video_socket: Optional[socket.socket] = None self.control_socket: Optional[socket.socket] = None self.control_socket_lock = threading.Lock() def __init_server_connection(self) -> None: """ Connect to android server, there will be two sockets: video and control socket. This method will also set resolution property. """ for _ in range(30): # 超时 写死 try: self.__video_socket = self.device.create_connection( Network.LOCAL_ABSTRACT, "scrcpy" ) break except AdbError: sleep(0.1) pass else: raise ConnectionError("Failed to connect scrcpy-server after 3 seconds") dummy_byte = self.__video_socket.recv(1) if not len(dummy_byte): raise ConnectionError("Did not receive Dummy Byte!") self.control_socket = self.device.create_connection( Network.LOCAL_ABSTRACT, "scrcpy" ) self.device_name = self.__video_socket.recv(64).decode("utf-8").rstrip("\x00") if not len(self.device_name): raise ConnectionError("Did not receive Device Name!") res = self.__video_socket.recv(4) self.resolution = struct.unpack(">HH", res) self.__video_socket.setblocking(False) def __deploy_server(self) -> None: """ Deploy server to android device. Push the scrcpy-server.jar into the Android device using the adb.push(...). Then a basic connection between client and server is established. """ cmd = [ "CLASSPATH=/data/local/tmp/scrcpy-server.jar", "app_process", "/", "com.genymobile.scrcpy.Server", VERSION, # Scrcpy server version "info", # Log level: info, verbose... f"{self.max_size}", # Max screen width (long side) f"{self.bitrate}", # Bitrate of video f"{self.max_fps}", # Max frame per second f"{self.lock_screen_orientation}", # Lock screen orientation "true", # Tunnel forward "-", # Crop screen "false", # Send frame rate to client "true", # Control enabled "0", # Display id "false", # Show touches "true" if self.stay_awake else "false", # Stay awake "-", # Codec (video encoding) options "-", # Encoder name "false", # Power off screen after server closed ] self.device.push(JAR, "/data/local/tmp/") self.__server_stream: AdbConnection = self.device.shell(cmd, stream=True) def start(self, threaded: bool = False) -> None: """ Start the client-server connection. In order to avoid unpredictable behaviors, this method must be called after the on_init and on_frame callback are specify. Args: threaded : If set to True the stream loop willl run in a separated thread. This mean that the code after client.strart() will be run. Otherwise the client.start() method starts a endless loop and the code after this method will never run. todo new_thread """ assert self.alive is False self.__deploy_server() self.__init_server_connection() self.alive = True
for func in self.listeners[EVENT_INIT]:
2
2023-12-23 12:52:58+00:00
4k
andreafailla/pix2beats
ui.py
[ { "identifier": "resize_and_convert", "path": "backend.py", "snippet": "def resize_and_convert(filename, tmpdir, n_pixels=None):\n \"\"\"\n Resize the image, convert to hsv, and save as png\n\n :param filename:\n :param tmpdir:\n :param n_pixels:\n :return:\n \"\"\"\n # Saves\n img = Image.open(filename).convert(\"RGB\")\n if n_pixels is not None:\n # Calculate the aspect ratio\n aspect_ratio = img.width / img.height\n\n # Calculate the new width based on the desired number of pixels\n new_width = int((n_pixels * aspect_ratio) ** 0.5)\n\n # Resize the image while maintaining the aspect ratio\n img = img.resize((new_width, int(new_width / aspect_ratio)))\n if not filename.startswith(tmpdir):\n img.save(f\"{tmpdir}/{filename.split('.')[0]}_resized.png\", \"PNG\")\n\n return img" }, { "identifier": "trackmaker", "path": "backend.py", "snippet": "def trackmaker(\n img, scale, key, octave, harmony, randomize_octaves, t_value, n_pixels, gain_db, drive_db, cutoff_hz,\n resonance_lad, drive_lad, delay_seconds, room_size, damping, wet_level, dry_level, width, rate_hz_chorus\n):\n # Make the scale from parameters above\n scale_to_use = get_scale(octave, key, scale)\n # Make the track!\n track, harmony = get_track_layers(img, scale=scale_to_use, t=t_value, n_pixels=n_pixels,\n randomize_octaves=randomize_octaves, harmonize=harmony)\n\n # Write the track into a file\n track_combined = np.vstack((track, harmony))\n wavfile.write('track.wav', rate=SAMPLE_RATE,\n data=track_combined.T.astype(np.float32))\n\n # Read the track\n try:\n with AudioFile('track.wav', 'r') as f:\n audio = f.read(f.frames)\n\n # Apply the pedalboard effects\n effected = apply_pb_effects(\n gain_db, drive_db, cutoff_hz, resonance_lad,\n drive_lad, delay_seconds, damping, room_size,\n wet_level, dry_level, width, rate_hz_chorus,\n audio, SAMPLE_RATE\n )\n\n # Write the audio back as a wav file:\n with AudioFile('track.wav', 'w', SAMPLE_RATE, effected.shape[0]) as f:\n f.write(effected)\n\n # Read the processed track\n with open('track.wav', 'rb') as f:\n audio_bytes = f.read()\n\n # Remove the track\n if os.path.exists('track.wav'):\n os.remove('track.wav')\n\n return audio_bytes\n except ValueError:\n return None" }, { "identifier": "rolling_title", "path": "backend.py", "snippet": "def rolling_title(placeholder, text, delay=0.05):\n \"\"\"\n Displays title with rolling effect\n Placeholder is the container where the title will be displayed\n \"\"\"\n while True:\n\n for i in range(len(text)):\n time.sleep(delay)\n placeholder.markdown(f'#### {text[:i + 1]}')\n time.sleep(1)\n for i in range(len(text)):\n time.sleep(delay)\n placeholder.markdown(f'#### {text[:len(text) - i]}')" }, { "identifier": "SCALES", "path": "constants.py", "snippet": "SCALES = {\n \"Major\": [0, 2, 4, 5, 7, 9, 11],\n \"Natural Minor\": [0, 2, 3, 5, 7, 8, 10],\n \"Dorian\": [0, 2, 3, 5, 7, 9, 10],\n \"Mixolydian\": [0, 2, 4, 5, 7, 9, 10],\n \"Aeolian\": [0, 2, 3, 5, 7, 8, 10],\n \"Phrygian\": [0, 1, 3, 5, 7, 8, 10],\n \"Lydian\": [0, 2, 4, 6, 7, 9, 11],\n \"Harmonic Minor\": [0, 2, 3, 5, 7, 8, 11],\n \"Melodic Minor\": [0, 2, 3, 5, 7, 8, 9, 10, 11],\n \"Locrian\": [0, 1, 3, 5, 6, 8, 10],\n \"Blues\": [0, 2, 3, 4, 5, 7, 9, 10, 11],\n \"Chromatic\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],\n}" }, { "identifier": "NOTES", "path": "constants.py", "snippet": "NOTES = [\"A\", \"A#\", \"B\", \"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\"]" }, { "identifier": "HARMONIES", "path": "constants.py", "snippet": "HARMONIES = {\n \"None\": 1,\n \"Major second\": 9 / 8,\n \"Minor third\": 6 / 5,\n \"Major third\": 5 / 4,\n \"Perfect fourth\": 4 / 3,\n \"Diatonic tritone\": 45 / 32,\n \"Perfect fifth\": 3 / 2,\n \"Minor sixth\": 8 / 5,\n \"Major sixth\": 5 / 3,\n \"Minor seventh\": 9 / 5,\n \"Major seventh\": 15 / 8,\n}" }, { "identifier": "SAMPLE_IMAGES", "path": "constants.py", "snippet": "SAMPLE_IMAGES = [\"mona_lisa.png\", \"pixel_art_landscape.png\", \"sunflower.png\"]" }, { "identifier": "PRESETS", "path": "my_presets.py", "snippet": "PRESETS = {\n 'None':\n {'scale': 'Major', 'key': 'A', 'octave': 2, 'harmony': 'None',\n 'randomize_octaves': True, 'resize_to_n_pixels': False,\n 't_value': 0.2, 'n_pixels': 64,\n 'gain_db': 0.0, 'drive_db': 0.0,\n 'cutoff_hz': 0.0, 'resonance_lad': 0.0, 'drive_lad': 1.0, 'delay_seconds': 0.0,\n 'room_size': 0.0, 'damping': 0.0, 'wet_level': 0.0, 'dry_level': 0.1, 'width': 0.0,\n 'rate_hz_chorus': 0.0},\n 'Bitcrusher': {'scale': 'Natural Minor', 'key': 'G', 'octave': 2, 'harmony': 'Perfect fifth',\n 'randomize_octaves': True, 'resize_to_n_pixels': False, 't_value': 0.1, 'n_pixels': 100,\n 'gain_db': 9.0, 'drive_db': 14.0, 'cutoff_hz': 81.0, 'resonance_lad': 0.4, 'drive_lad': 5.8,\n 'delay_seconds': 0.0, 'room_size': 0.1, 'damping': 0.0, 'wet_level': 0.0, 'dry_level': 0.3,\n 'width': 0.0, 'rate_hz_chorus': 0.0},\n 'Sleepy Silly Penguin': {\"scale\": \"Dorian\", \"key\": \"F\", \"octave\": 3, \"harmony\": \"Major third\",\n \"randomize_octaves\": False, \"t_value\": 0.22, \"n_pixels\": 143, \"gain_db\": 0.0,\n \"drive_db\": 0.0, \"cutoff_hz\": 0.0, \"resonance_lad\": 0.0, \"drive_lad\": 1.0,\n \"delay_seconds\": 0.0, \"room_size\": 0.0, \"damping\": 0.0, \"wet_level\": 0.0, \"dry_level\": 0.1,\n \"width\": 0.0, \"rate_hz_chorus\": 0.3},\n 'Underground Cave': {\"scale\": \"Mixolydian\", \"key\": \"C\", \"octave\": 2, \"harmony\": \"Major sixth\",\n \"randomize_octaves\": False, \"t_value\": 0.2, \"n_pixels\": 219, \"gain_db\": 0.0,\n \"drive_db\": 0.0, \"cutoff_hz\": 0.0, \"resonance_lad\": 0.2, \"drive_lad\": 1.0,\n \"delay_seconds\": 0.1, \"room_size\": 0.2, \"damping\": 0.3, \"wet_level\": 0.0, \"dry_level\": 0.1,\n \"width\": 0.0, \"rate_hz_chorus\": 1.4},\n 'Distorted Bass': {\"scale\": \"Aeolian\", \"key\": \"A#\", \"octave\": 1, \"harmony\": \"None\", \"randomize_octaves\": False,\n \"t_value\": 0.3, \"n_pixels\": 64, \"gain_db\": 12.0, \"drive_db\": 4.0, \"cutoff_hz\": 0.0,\n \"resonance_lad\": 0.2, \"drive_lad\": 1.0, \"delay_seconds\": 0.0, \"room_size\": 0.1,\n \"damping\": 0.0, \"wet_level\": 0.0, \"dry_level\": 0.6, \"width\": 0.0, \"rate_hz_chorus\": 0.0},\n 'Bitcrusher (re:)': {\"scale\": \"Natural Minor\", \"key\": \"G\", \"octave\": 3, \"harmony\": \"Major seventh\",\n \"randomize_octaves\": True, \"t_value\": 0.1, \"n_pixels\": 100, \"gain_db\": 9.0, \"drive_db\": 14.0,\n \"cutoff_hz\": 81.0, \"resonance_lad\": 0.4, \"drive_lad\": 5.8, \"delay_seconds\": 0.0,\n \"room_size\": 0.1, \"damping\": 0.0, \"wet_level\": 0.0, \"dry_level\": 0.3, \"width\": 0.0,\n \"rate_hz_chorus\": 0.0}\n\n}" } ]
import json # io import tempfile import streamlit as st # UI from PIL import Image # image processing from backend import resize_and_convert, trackmaker # processing from backend import rolling_title # animation from constants import SCALES, NOTES, HARMONIES, SAMPLE_IMAGES # constants from my_presets import PRESETS
3,525
def update_session_state(preset): for k, v in preset.items(): if k != "octave": st.session_state[k] = v else: octave_options = ["Low", "Mid", "High"] st.session_state[k] = octave_options[v - 1] def write_intro(): """Defines general settings and introduces the app. :return: placeholder for the rolling title """ st.set_page_config( page_title="Pix2Beats", page_icon=":musical_note:", layout="centered", initial_sidebar_state="expanded", ) st.markdown( """ <style> .stApp { background: url("https://images.unsplash.com/photo-1557695126-fa2ce36f6828?q=80&w=2670&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"); background-size: cover; background-opacity: 0; } </style>""", unsafe_allow_html=True, ) st.title(":blue[Pix]2:red[Beats]") plh = st.empty() # Display the description st.markdown( """ Welcome to :blue[Pix]2:red[Beats]—a web application at the intersection of visual art and musical expression. Harnessing the power of Artificial Intelligence, :blue[Pix]2:red[Beats] transforms your images into sounds, unlocking a fascinating synergy between the realms of visual and auditory creativity. At the heart of :blue[Pix]2:red[Beats] lies the intuition that both images and sound can be effortlessly represented as matrices of numbers. This unique foundation allows us to create a one-of-a-kind mapping between color spaces and musical scales. Choose an image, tinker with the parameters, and let :blue[Pix]2:red[Beats] do the rest :musical_note: """ ) return plh def handle_presets(): presetsel, presetupl, _ = st.columns([1, 1, 2]) with presetsel: preset_name = st.selectbox( "***Choose a preset***", PRESETS.keys(), key="preset_select", help="Tip: you can modify an existing preset by selecting it and then selecting " "*None* from this list.", ) if preset_name is not None: if preset_name != "None": update_session_state(PRESETS[preset_name]) with presetupl: uploaded_preset = st.file_uploader( "***...or upload your own!***", type=["json"] ) css = """ <style> [data-testid='stFileUploader'] { width: max-content; } [data-testid='stFileUploader'] section { padding: 0; float: left; } [data-testid='stFileUploader'] section > input + div { display: none; } [data-testid='stFileUploader'] section + div { float: right; padding-top: 0; } </style> """ st.markdown(css, unsafe_allow_html=True) if uploaded_preset is not None: preset_name = uploaded_preset.name.split(".")[0] preset = json.load(uploaded_preset) PRESETS[preset_name] = preset update_session_state(preset) def make_sidebar_and_select_file(): """ Create the sidebar for the app The sidebar lets the user select an image to use :return: the image filename """ filename = None if ( st.sidebar.radio( "Image to use", ("Use Example Image", "Upload Image"), label_visibility="hidden", ) == "Use Example Image" ):
def init_session_state(): for k, v in PRESETS["None"].items(): if k not in st.session_state: if k != "octave": st.session_state[k] = v else: octave_options = ["Low", "Mid", "High"] st.session_state[k] = octave_options[v - 1] def update_session_state(preset): for k, v in preset.items(): if k != "octave": st.session_state[k] = v else: octave_options = ["Low", "Mid", "High"] st.session_state[k] = octave_options[v - 1] def write_intro(): """Defines general settings and introduces the app. :return: placeholder for the rolling title """ st.set_page_config( page_title="Pix2Beats", page_icon=":musical_note:", layout="centered", initial_sidebar_state="expanded", ) st.markdown( """ <style> .stApp { background: url("https://images.unsplash.com/photo-1557695126-fa2ce36f6828?q=80&w=2670&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"); background-size: cover; background-opacity: 0; } </style>""", unsafe_allow_html=True, ) st.title(":blue[Pix]2:red[Beats]") plh = st.empty() # Display the description st.markdown( """ Welcome to :blue[Pix]2:red[Beats]—a web application at the intersection of visual art and musical expression. Harnessing the power of Artificial Intelligence, :blue[Pix]2:red[Beats] transforms your images into sounds, unlocking a fascinating synergy between the realms of visual and auditory creativity. At the heart of :blue[Pix]2:red[Beats] lies the intuition that both images and sound can be effortlessly represented as matrices of numbers. This unique foundation allows us to create a one-of-a-kind mapping between color spaces and musical scales. Choose an image, tinker with the parameters, and let :blue[Pix]2:red[Beats] do the rest :musical_note: """ ) return plh def handle_presets(): presetsel, presetupl, _ = st.columns([1, 1, 2]) with presetsel: preset_name = st.selectbox( "***Choose a preset***", PRESETS.keys(), key="preset_select", help="Tip: you can modify an existing preset by selecting it and then selecting " "*None* from this list.", ) if preset_name is not None: if preset_name != "None": update_session_state(PRESETS[preset_name]) with presetupl: uploaded_preset = st.file_uploader( "***...or upload your own!***", type=["json"] ) css = """ <style> [data-testid='stFileUploader'] { width: max-content; } [data-testid='stFileUploader'] section { padding: 0; float: left; } [data-testid='stFileUploader'] section > input + div { display: none; } [data-testid='stFileUploader'] section + div { float: right; padding-top: 0; } </style> """ st.markdown(css, unsafe_allow_html=True) if uploaded_preset is not None: preset_name = uploaded_preset.name.split(".")[0] preset = json.load(uploaded_preset) PRESETS[preset_name] = preset update_session_state(preset) def make_sidebar_and_select_file(): """ Create the sidebar for the app The sidebar lets the user select an image to use :return: the image filename """ filename = None if ( st.sidebar.radio( "Image to use", ("Use Example Image", "Upload Image"), label_visibility="hidden", ) == "Use Example Image" ):
filename = st.sidebar.selectbox("Choose a sample image", SAMPLE_IMAGES)
6
2023-12-30 13:12:10+00:00
4k
AbstractUmbra/GreatAsset
great_asset/save_file.py
[ { "identifier": "decrypt", "path": "great_asset/crypt.py", "snippet": "def decrypt(\n *, path: str | PathLike[str] | Path | None = None, data: bytes | None = None\n) -> Any: # it returns the type of file we decrypt but alas\n if not path and not data:\n raise ValueError(\"Either `path` or `data` must be provided.\")\n\n if path:\n if not isinstance(path, Path):\n path = Path(path)\n\n with path.open(\"rb\") as fp:\n read_data = fp.read()\n else:\n read_data = data\n assert read_data # guarded earlier\n\n # The initialisation vector is the first 16 bytes of the save file.\n init_vector = read_data[:16]\n # then we take the proceeding N bytes as the data\n _to_decrypt = read_data[16:]\n\n # create the decryption key from the provided data\n decryption_key = PBKDF2(CRYPTO_PASSWORD, init_vector, dkLen=16, count=100)\n\n # with the key we create the needed cipher\n cipher = AES.new(decryption_key, AES.MODE_CBC, init_vector) # type: ignore # the upstream types aren't great\n\n # and now we decrypt the data\n decrypted_data = unpad(cipher.decrypt(_to_decrypt), AES.block_size, style=\"pkcs7\")\n\n # and it's always UTF-8\n resolved_data = decrypted_data.decode(\"utf-8\")\n\n return _from_json(resolved_data)" }, { "identifier": "encrypt", "path": "great_asset/crypt.py", "snippet": "def encrypt(path: str | PathLike[str] | Path, /) -> bytes:\n if not isinstance(path, Path):\n path = Path(path)\n\n with path.open(\"rb\") as fp:\n data_to_encrypt = fp.read()\n\n # Generate a random IV (Initialization Vector)\n init_vector = Random.new().read(16)\n\n # Derive the key using PBKDF2 with SHA1 hash algorithm\n key = PBKDF2(CRYPTO_PASSWORD, init_vector, dkLen=16, count=100)\n\n # Create AES cipher object\n cipher = AES.new(key, AES.MODE_CBC, init_vector) # type: ignore # the upstream types aren't great\n\n # Pad the data with PKCS7 before encryption\n padded_data = pad(data_to_encrypt, AES.block_size, style=\"pkcs7\")\n\n # Encrypt the data\n encrypted_data = init_vector + cipher.encrypt(padded_data)\n\n return encrypted_data" }, { "identifier": "BestiaryEntry", "path": "great_asset/enums.py", "snippet": "class BestiaryEntry(Enum):\n snare_flea = 0\n bracken = 1\n thumper = 2\n eyeless_dog = 3\n hoarding_bug = 4\n hygroderes = 5\n slime = 5\n forest_keepers = 6\n giants = 6\n coil_head = 7\n spring_head = 7\n lasso_man = 8 # not implemented?\n earth_leviathan = 9\n sand_worm = 9\n jester = 10\n jack_in_the_box = 10\n spore_lizard = 11\n bunker_spider = 12\n spider = 12\n manticoil = 13\n circuit_bees = 14\n bees = 14\n roaming_locusts = 15\n locusts = 15\n baboon_hawk = 16\n nutcracker = 17\n\n @staticmethod\n def all() -> list[\"BestiaryEntry\"]:\n return list(BestiaryEntry)" }, { "identifier": "ExtraUnlock", "path": "great_asset/enums.py", "snippet": "class ExtraUnlock(Enum):\n orange_suit = 0\n green_suit = 1\n hazard_suit = 2\n pyjama_suit = 3\n purple_suit = 24\n\n @staticmethod\n def all() -> list[\"ExtraUnlock\"]:\n return list(ExtraUnlock)" }, { "identifier": "Item", "path": "great_asset/enums.py", "snippet": "class Item(Enum):\n binoculars = 0 # not yet implemented\n boom_box = 1\n cardboard_box = 2\n flashlight = 3\n jetpack = 4\n key = 5\n lockpick = 6\n handheld_monitor = 8 # not yet implemented\n pro_flashlight = 9\n shovel = 10\n flashbang = 11\n extension_ladder = 12\n tzp_inhalant = 13\n walkie_talkie = 14\n stun_gun = 15" }, { "identifier": "Moon", "path": "great_asset/enums.py", "snippet": "class Moon(Enum):\n experimentation = 0\n assurance = 1\n vow = 2\n company_building = 3\n march = 4\n rend = 5\n dine = 6\n offense = 7\n titan = 8" }, { "identifier": "Scrap", "path": "great_asset/enums.py", "snippet": "class Scrap(Enum):\n apparatus = 7\n magic_7_ball = 16\n airhorn = 17\n bell = 18\n big_bolt = 19\n bottles = 20\n hairbrush = 21\n candy = 22\n cash_register = 23\n chemical_jug = 24\n clown_horn = 25\n large_axel = 26\n teeth = 27\n dustpan = 28\n egg_beater = 29\n v_type_engine = 30\n golden_cup = 31\n lamp = 32\n painting = 33\n plastic_fish = 34\n laser_pointer = 35\n gold_bar = 36\n hairdryer = 37\n magnifying_glass = 38\n tattered_metal_sheet = 39\n cookie_mold_pan = 40\n coffee_mug = 41\n perfume_bottle = 42\n old_phone = 43\n jar_of_pickles = 44\n pill_bottle = 45\n remote = 46\n ring = 47\n robot_toy = 48\n rubber_ducky = 49\n red_soda = 50\n steering_wheel = 51\n stop_sign = 52\n tea_kettle = 53\n toothpaste = 54\n toy_cube = 55\n bee_hive = 56\n radar_booster = 57\n yield_sign = 58\n shotgun = 59\n shotgun_shell = 60\n spray_paint = 61\n homemade_flashbang = 62\n gift_box = 63\n flask = 64\n tragedy = 65\n comedy = 66\n whoopie_cushion = 67" }, { "identifier": "ShipUnlock", "path": "great_asset/enums.py", "snippet": "class ShipUnlock(Enum):\n cozy_lights = 4, \"Cozy lights\"\n teleporter = 5, \"Teleporter\"\n television = 6, \"Television\"\n tv = 6, \"Television\"\n cupboard = 7, \"Cupboard\"\n file_cabinet = 8, \"File Cabinet\"\n toilet = 9, \"Toilet\"\n shower = 10, \"Shower\"\n light_switch = 11, \"Light switch\"\n record_player = 12, \"Record player\"\n table = 13, \"Table\"\n romantic_table = 14, \"Romantic table\"\n bunkbeds = 15, \"Bunkbeds\"\n terminal = 16, \"Terminal\"\n signal_translator = 17, \"Signal translator\"\n signal_transmitter = 17, \"Signal translator\"\n loud_horn = 18, \"Loud horn\"\n inverse_teleporter = 19, \"Inverse Teleporter\"\n jack_o_lantern = 20, \"JackOLantern\"\n welcome_mat = 21, \"Welcome mat\"\n goldfish = 22, \"Goldfish\"\n plushie_pajama_man = 23, \"Plushie pajama man\"\n plushie_pyjama_man = 23, \"Plushie pajama man\"\n\n def __init__(self, value: int, serialised_name: str) -> None:\n self._serialised_value: int = value\n self._serialised_name: str = serialised_name\n\n @property\n def serialised_value(self) -> int:\n return self._serialised_value\n\n @property\n def serialised_name(self) -> str:\n return self._serialised_name\n\n @staticmethod\n def all() -> list[\"ShipUnlock\"]:\n return list(ShipUnlock)" }, { "identifier": "GrabbableScrap", "path": "great_asset/item.py", "snippet": "class GrabbableScrap(NamedTuple):\n id: int\n value: int\n pos: InnerVectorValue" }, { "identifier": "MISSING", "path": "great_asset/utils.py", "snippet": " def _to_json(obj: Any, /) -> str:\n def _to_json(obj: Any, /) -> str:\n def __eq__(self, other: object) -> bool:\n def __bool__(self) -> bool:\n def __hash__(self) -> int:\n def __repr__(self) -> str:\ndef resolve_save_path(save_number: SaveValue, /) -> pathlib.Path:\nclass _MissingSentinel:\nMISSING: Any = _MissingSentinel()" }, { "identifier": "Vector", "path": "great_asset/vector.py", "snippet": "class Vector:\n def __init__(self, x: float, y: float, z: float) -> None:\n self.x: float = float(x)\n self.y: float = float(y)\n self.z: float = float(z)\n\n def __repr__(self) -> str:\n return f\"<Vector x={self.x} y={self.y} z={self.z}>\"\n\n @classmethod\n def default(cls) -> Vector:\n return cls(-3.5, 2.5, -12.5)\n\n @classmethod\n def in_cupboard(cls, cupboard_position: VectorValue | None = None) -> Vector:\n if cupboard_position:\n position = cls.from_dict(cupboard_position[\"value\"])\n return cls(\n uniform(position.x, position.x - 0.5),\n choice(SHELVES),\n uniform(position.z, position.z - 0.5),\n )\n return cls(uniform(-3.0, -3.5), choice(SHELVES), uniform(-12, -12.5))\n\n @classmethod\n def from_dict(cls, payload: InnerVectorValue) -> Vector:\n return cls(**payload)\n\n def serialise(self) -> InnerVectorValue:\n return {\"x\": self.x, \"y\": self.y, \"z\": self.z}" } ]
import random from pathlib import Path from typing import TYPE_CHECKING, Any, Generic, Self, TypeVar from .crypt import decrypt, encrypt from .enums import BestiaryEntry, ExtraUnlock, Item, Moon, Scrap, ShipUnlock from .item import GrabbableScrap from .utils import MISSING, SaveValue, _to_json, resolve_save_path # type: ignore[reportPrivateUsage] we allow this here. from .vector import Vector from os import PathLike from types import TracebackType from .types_.config_file import ConfigFile as ConfigFileType from .types_.save_file import ( SaveFile as SaveFileType, ) from .types_.shared import *
3,353
""" The MIT License (MIT) Copyright (c) 2023-present AbstractUmbra Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import annotations if TYPE_CHECKING: SaveT = TypeVar("SaveT", "SaveFileType", "ConfigFileType") TEMP_FILE = Path("./_previously_decrypted_file.json") TIPS = [ "LC_MoveObjectsTip", "LC_StorageTip", "LC_LightningTip", "LCTip_SecureDoors", "LC_EclipseTip", "LCTip_SellScrap", "LCTip_UseManual", "LC_IntroTip1", ] __all__ = ( "SaveFile", "ConfigFile", ) class _BaseSaveFile(Generic[SaveT]): _inner_data: SaveT _file_type: str _extra_data: dict[str, Any] _written: bool _skip_parsing: bool __slots__ = ( "_inner_data", "_file_type", "_extra_data", "_written", "_skip_parsing", "path", ) def __init__(self, path: str | PathLike[str] | Path, /) -> None: self._skip_parsing = False self._written = False if not isinstance(path, Path): path = Path(path) if not path.exists(): raise ValueError("The path given does not exist") self.path: Path = path self._parse_file() @classmethod def from_data(cls, *, data: bytes, path: Path | None = None, save_number: SaveValue | None = None) -> Self: _number = save_number or "" path = path or Path(f"./LCSaveFile{_number}") file = cls.__new__(cls) file._skip_parsing = True
""" The MIT License (MIT) Copyright (c) 2023-present AbstractUmbra Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import annotations if TYPE_CHECKING: SaveT = TypeVar("SaveT", "SaveFileType", "ConfigFileType") TEMP_FILE = Path("./_previously_decrypted_file.json") TIPS = [ "LC_MoveObjectsTip", "LC_StorageTip", "LC_LightningTip", "LCTip_SecureDoors", "LC_EclipseTip", "LCTip_SellScrap", "LCTip_UseManual", "LC_IntroTip1", ] __all__ = ( "SaveFile", "ConfigFile", ) class _BaseSaveFile(Generic[SaveT]): _inner_data: SaveT _file_type: str _extra_data: dict[str, Any] _written: bool _skip_parsing: bool __slots__ = ( "_inner_data", "_file_type", "_extra_data", "_written", "_skip_parsing", "path", ) def __init__(self, path: str | PathLike[str] | Path, /) -> None: self._skip_parsing = False self._written = False if not isinstance(path, Path): path = Path(path) if not path.exists(): raise ValueError("The path given does not exist") self.path: Path = path self._parse_file() @classmethod def from_data(cls, *, data: bytes, path: Path | None = None, save_number: SaveValue | None = None) -> Self: _number = save_number or "" path = path or Path(f"./LCSaveFile{_number}") file = cls.__new__(cls) file._skip_parsing = True
decrypted: SaveT = decrypt(data=data)
0
2023-12-25 11:03:20+00:00
4k
Amirtheahmed/ddd-cqrs-fastapi
src/apps/photostore/dependencies/PhotoStoreContainer.py
[ { "identifier": "StatusGetController", "path": "src/apps/backoffice/controllers/StatusGetController.py", "snippet": "class StatusGetController(BackofficeController):\n\n def __init__(self):\n pass\n\n async def run(self, req: Request) -> JSONResponse:\n return JSONResponse(status_code=HTTPStatus.OK)" }, { "identifier": "PhotoPostController", "path": "src/apps/photostore/controllers/PhotoPostController.py", "snippet": "class PhotoPostController(BackofficeController):\n\n def __init__(\n self,\n command_bus: CommandBus,\n ):\n self.__command_bus = command_bus\n self.__error_handler = JsonResponseErrorHandler()\n\n async def run(\n self,\n request: Request,\n ) -> JSONResponse:\n form = await request.form()\n body = dict(form)\n file: UploadFile = body.get('file')\n raw_data = await bytify_upload_file(file)\n command: CreatePhotoCommand = CreatePhotoCommand(body['id'], body['name'], body['user-id'], raw_data)\n try:\n await self.__command_bus.dispatch(command)\n except DomainError as err:\n return self.__error_handler.resolve(err)\n\n return JSONResponse(status_code=HTTPStatus.CREATED)" }, { "identifier": "CreatePhotoCommandHandler", "path": "src/contexts/photostore/photo/application/createone/CreatePhotoCommandHandler.py", "snippet": "class CreatePhotoCommandHandler(BaseObject, CommandHandler):\n\n __subscription: str = CreatePhotoCommand.COMMAND_TYPE\n\n def __init__(self, creator: PhotoCreator):\n self.__creator = creator\n\n def subscribed_to(self) -> str:\n return self.__subscription\n\n async def handle(self, command: CreatePhotoCommand) -> NoReturn:\n photo_id: PhotoId = PhotoId(command.id)\n photo_name: PhotoName = PhotoName(command.name)\n user_id: UserId = UserId(command.user_id)\n file: PhotoFile = PhotoFile(command.file)\n\n await self.__creator.run(photo_id, photo_name, user_id, file)" }, { "identifier": "PhotoCreator", "path": "src/contexts/photostore/photo/application/createone/PhotoCreator.py", "snippet": "class PhotoCreator:\n\n def __init__(self, photo_repository: PhotoRepository, event_bus: EventBus):\n self.__photo_repository = photo_repository\n self.__event_bus = event_bus\n\n async def run(self, photo_id: PhotoId, name: PhotoName, user_id: UserId, file: PhotoFile):\n photo: Photo = Photo.create(photo_id, name, user_id, file)\n await self.__photo_repository.create_one(photo)\n await self.__event_bus.publish(photo.pull_domain_events())" }, { "identifier": "MinioPhotoRepository", "path": "src/contexts/photostore/photo/infrastructure/persistence/MinioPhotoStorePhotoRepository.py", "snippet": "class MinioPhotoRepository(MinioRepository, PhotoRepository):\n\n __BUCKET_NAME = 'photostore'\n __DIRECTORY_DEFAULT_NAME = 'photos'\n\n def get_bucket_name(self):\n return MinioPhotoRepository.__BUCKET_NAME\n\n def get_directory_name(self):\n return MinioPhotoRepository.__DIRECTORY_DEFAULT_NAME\n\n async def create_one(self, photo: Photo) -> NoReturn:\n try:\n photo = await super()._create(\n obj_id=photo.id.value(),\n obj=photo.file.value(),\n file_extension='jpg',\n codification='base64',\n )\n return photo\n except Exception as e:\n raise PhotoAlreadyExistsError('Photo with ID <{}> already exists.'.format(photo.id.value()))" }, { "identifier": "MinioPhotoConfigFactory", "path": "src/contexts/photostore/photo/infrastructure/persistence/config/MinioPhotoConfigFactory.py", "snippet": "class MinioPhotoConfigFactory:\n\n @staticmethod\n def create() -> MinioConfiguration:\n config = MinioConfiguration(\n host=EnvManager.get(EnvVar.SHARED_PHOTO_MINIO_HOST),\n port=EnvManager.get(EnvVar.SHARED_PHOTO_MINIO_PORT, parser=int),\n access_key=EnvManager.get(EnvVar.SHARED_PHOTO_MINIO_ACCESS_KEY),\n secret_key=EnvManager.get(EnvVar.SHARED_PHOTO_MINIO_SECRET_KEY),\n region=EnvManager.get(EnvVar.SHARED_PHOTO_MINIO_REGION),\n secure=EnvManager.get(EnvVar.SHARED_PHOTO_MINIO_SECURE, parser=json.loads),\n )\n return config" }, { "identifier": "CreatePhotoRegistryOnPhotoCreated", "path": "src/contexts/photostore/photoregistry/application/CreatePhotoRegistryOnPhotoCreated.py", "snippet": "class CreatePhotoRegistryOnPhotoCreated(BaseObject, EventSubscriber):\n\n __SUBSCRIPTIONS = [PhotoCreatedDomainEvent.EVENT_TYPE]\n\n def __init__(self, creator: PhotoRegistryCreator, event_bus: Optional[EventBus]):\n self.__creator = creator\n if event_bus is not None:\n event_bus.add_subscribers([self])\n\n def subscribed_to(self) -> List[str]:\n return CreatePhotoRegistryOnPhotoCreated.__SUBSCRIPTIONS\n\n async def on(self, event: PhotoCreatedDomainEvent):\n photo_id: PhotoId = event.photo_id\n photo_name: PhotoName = event.photo_name\n user_id: UserId = event.user_id\n await self.__creator.run(photo_id, photo_name, user_id)" }, { "identifier": "PhotoRegistryCreator", "path": "src/contexts/photostore/photoregistry/application/PhotoRegistryCreator.py", "snippet": "class PhotoRegistryCreator:\n\n def __init__(self, photo_registry_repository: PhotoRegistryRepository, event_bus: EventBus):\n self.__photo_repository = photo_registry_repository\n self.__event_bus = event_bus\n\n async def run(self, photo_id: PhotoId, name: PhotoName, user_id: UserId):\n photo_registry: PhotoRegistry = PhotoRegistry.create(photo_id, name, user_id)\n await self.__photo_repository.create_one(photo_registry)\n await self.__event_bus.publish(photo_registry.pull_domain_events())" }, { "identifier": "PyMongoPhotoRegistryRepository", "path": "src/contexts/photostore/photoregistry/infrastructure/persistence/PyMongoPhotoRegistryRepository.py", "snippet": "class PyMongoPhotoRegistryRepository(PyMongoRepository, PhotoRegistryRepository):\n\n __COLLECTION_NAME = 'photo-registry'\n __DATABASE_NAME = 'python-ddd-example'\n\n def __init__(self, client: MongoClient):\n super().__init__(client)\n super()._get_collection().create_index([\n ('id', ASCENDING)\n ], unique=True)\n\n def get_database_name(self):\n return self.__DATABASE_NAME\n\n def get_collection_name(self):\n return self.__COLLECTION_NAME\n\n async def find_by_criteria(self, criteria: Criteria) -> Tuple[List[PhotoRegistry], Optional[CriteriaQueryMetadata]]:\n results, count = await super()._find_by_criteria(criteria)\n entities = [PhotoRegistry.create_from_primitives(result) for result in results]\n metadata = CriteriaQueryMetadata(count)\n return entities, metadata\n\n async def create_one(self, registry: PhotoRegistry) -> NoReturn:\n try:\n registry = await super()._create_one(registry.to_primitives())\n return registry\n except DuplicateKeyError as e:\n raise PhotoRegistryAlreadyExistsError('User with ID <{}> already exists.'.format(registry.id.value()))" }, { "identifier": "PyMongoPhotoRegistryConfigFactory", "path": "src/contexts/photostore/photoregistry/infrastructure/persistence/config/PyMongoPhotoRegistryConfigFactory.py", "snippet": "class PyMongoPhotoRegistryConfigFactory:\n\n @staticmethod\n def create() -> PyMongoConfiguration:\n config = PyMongoConfiguration(\n EnvManager.get(EnvVar.SHARED_PHOTO_REGISTRY_MONGO_HOST),\n EnvManager.get(EnvVar.SHARED_PHOTO_REGISTRY_MONGO_PORT, parser=int),\n )\n return config" }, { "identifier": "InMemoryCommandBus", "path": "src/contexts/shared/Infrastructure/commandbus/InMemoryCommandBus.py", "snippet": "class InMemoryCommandBus(BaseObject, CommandBus):\n\n def __init__(self, *handlers: CommandHandler):\n handler_mapping = {}\n for handler in handlers:\n handler_mapping[handler.subscribed_to()] = handler\n self.__handler_mapping: Dict[str, CommandHandler] = handler_mapping\n\n def __search(self, command_name: str):\n if command_name not in self.__handler_mapping:\n raise CommandNotRegisteredError()\n return self.__handler_mapping[command_name]\n\n async def dispatch(self, command: Command) -> Any:\n query_type: str = command.get_command_type_name()\n handler = self.__search(query_type)\n return await handler.handle(command)" }, { "identifier": "InMemoryEventBus", "path": "src/contexts/shared/Infrastructure/eventbus/InMemoryEventBus.py", "snippet": "class InMemoryEventBus(BaseObject, EventBus):\n\n def __init__(self, *subscribers: EventSubscriber):\n event_subscriber_mapping: Dict[str, List[EventSubscriber]] = {}\n for subscriber in subscribers:\n for event in subscriber.subscribed_to():\n if event not in event_subscriber_mapping:\n event_subscriber_mapping[event] = []\n event_subscriber_mapping[event].append(subscriber)\n self.__subscriptions = event_subscriber_mapping\n\n def start(self):\n pass\n\n async def publish(self, events: List[DomainEvent]):\n for event in events:\n event_type = event.get_event_type_name()\n if event_type not in self.__subscriptions:\n continue\n subscribers = self.__subscriptions[event_type]\n for subscriber in subscribers:\n await subscriber.on(event) # TODO: add gather or future\n\n def add_subscribers(self, subscribers: List[EventSubscriber]):\n for subscriber in subscribers:\n self.add_subscriber(subscriber)\n\n def add_subscriber(self, subscriber: EventSubscriber):\n event_types = subscriber.subscribed_to()\n for event_type in event_types:\n if event_type not in self.__subscriptions:\n self.__subscriptions[event_type] = []\n self.__subscriptions[event_type].append(subscriber)" }, { "identifier": "MinioClientFactory", "path": "src/contexts/shared/Infrastructure/persistence/minio/MinioClientFactory.py", "snippet": "class MinioClientFactory:\n\n __clients: Dict[str, Minio] = {}\n\n @staticmethod\n def __get_client(context_name: str):\n return MinioClientFactory.__clients.get(context_name)\n\n @staticmethod\n def __add_client(context_name: str, client: Minio):\n MinioClientFactory.__clients[context_name] = client\n\n @staticmethod\n def create_instance(context_name: str, config: Optional[PyMongoConfiguration] = None):\n client = MinioClientFactory.__get_client(context_name)\n if client is not None:\n return client\n\n if config is None:\n config = MinioConfiguration()\n client = config.create_client_from_config()\n MinioClientFactory.__add_client(context_name, client)\n return client" }, { "identifier": "PyMongoClientFactory", "path": "src/contexts/shared/Infrastructure/persistence/mongo/PyMongoClientFactory.py", "snippet": "class PyMongoClientFactory:\n\n __clients: Dict[str, MongoClient] = {}\n\n @staticmethod\n def __get_client(context_name: str):\n return PyMongoClientFactory.__clients.get(context_name)\n\n @staticmethod\n def __add_client(context_name: str, client: MongoClient):\n PyMongoClientFactory.__clients[context_name] = client\n\n @staticmethod\n def create_instance(context_name: str, config: Optional[PyMongoConfiguration] = None):\n client = PyMongoClientFactory.__get_client(context_name)\n if client is not None:\n return client\n\n if config is None:\n config = PyMongoConfiguration()\n client = config.create_client_from_config()\n PyMongoClientFactory.__add_client(context_name, client)\n return client" } ]
from dependency_injector import containers, providers from src.apps.backoffice.controllers.StatusGetController import StatusGetController from src.apps.photostore.controllers.PhotoPostController import PhotoPostController from src.contexts.photostore.photo.application.createone.CreatePhotoCommandHandler import CreatePhotoCommandHandler from src.contexts.photostore.photo.application.createone.PhotoCreator import PhotoCreator from src.contexts.photostore.photo.infrastructure.persistence.MinioPhotoStorePhotoRepository import MinioPhotoRepository from src.contexts.photostore.photo.infrastructure.persistence.config.MinioPhotoConfigFactory import \ MinioPhotoConfigFactory from src.contexts.photostore.photoregistry.application.CreatePhotoRegistryOnPhotoCreated import \ CreatePhotoRegistryOnPhotoCreated from src.contexts.photostore.photoregistry.application.PhotoRegistryCreator import PhotoRegistryCreator from src.contexts.photostore.photoregistry.infrastructure.persistence.PyMongoPhotoRegistryRepository import \ PyMongoPhotoRegistryRepository from src.contexts.photostore.photoregistry.infrastructure.persistence.config.PyMongoPhotoRegistryConfigFactory import \ PyMongoPhotoRegistryConfigFactory from src.contexts.shared.Infrastructure.commandbus.InMemoryCommandBus import InMemoryCommandBus from src.contexts.shared.Infrastructure.eventbus.InMemoryEventBus import InMemoryEventBus from src.contexts.shared.Infrastructure.persistence.minio.MinioClientFactory import MinioClientFactory from src.contexts.shared.Infrastructure.persistence.mongo.PyMongoClientFactory import PyMongoClientFactory
3,241
class PhotoStoreContainer(containers.DeclarativeContainer): event_bus = providers.Singleton( InMemoryEventBus, ) photo_minio_config = providers.Singleton(MinioPhotoConfigFactory.create) photo_minio_client = providers.Singleton(MinioClientFactory.create_instance, 'photo', photo_minio_config) photo_registry_mongo_config = providers.Singleton(PyMongoPhotoRegistryConfigFactory.create) photo_registry_mongo_client = providers.Singleton(PyMongoClientFactory.create_instance, 'photo-registry', photo_registry_mongo_config) photo_repository = providers.Singleton(MinioPhotoRepository, photo_minio_client) photo_registry_repository = providers.Singleton(PyMongoPhotoRegistryRepository, photo_registry_mongo_client) photo_creator = providers.Singleton(PhotoCreator, photo_repository, event_bus) photo_registry_creator = providers.Singleton(PhotoRegistryCreator, photo_registry_repository, event_bus) create_photo_command_handler = providers.Singleton( CreatePhotoCommandHandler, photo_creator, ) create_photo_registry_on_photo_created = providers.Singleton( CreatePhotoRegistryOnPhotoCreated, photo_registry_creator, event_bus, ) command_bus = providers.Singleton( InMemoryCommandBus, create_photo_command_handler, )
class PhotoStoreContainer(containers.DeclarativeContainer): event_bus = providers.Singleton( InMemoryEventBus, ) photo_minio_config = providers.Singleton(MinioPhotoConfigFactory.create) photo_minio_client = providers.Singleton(MinioClientFactory.create_instance, 'photo', photo_minio_config) photo_registry_mongo_config = providers.Singleton(PyMongoPhotoRegistryConfigFactory.create) photo_registry_mongo_client = providers.Singleton(PyMongoClientFactory.create_instance, 'photo-registry', photo_registry_mongo_config) photo_repository = providers.Singleton(MinioPhotoRepository, photo_minio_client) photo_registry_repository = providers.Singleton(PyMongoPhotoRegistryRepository, photo_registry_mongo_client) photo_creator = providers.Singleton(PhotoCreator, photo_repository, event_bus) photo_registry_creator = providers.Singleton(PhotoRegistryCreator, photo_registry_repository, event_bus) create_photo_command_handler = providers.Singleton( CreatePhotoCommandHandler, photo_creator, ) create_photo_registry_on_photo_created = providers.Singleton( CreatePhotoRegistryOnPhotoCreated, photo_registry_creator, event_bus, ) command_bus = providers.Singleton( InMemoryCommandBus, create_photo_command_handler, )
status_get_controller = providers.Singleton(StatusGetController)
0
2023-12-27 13:58:25+00:00
4k
smonsays/modular-hyperteacher
metax/data/dataset/teacher.py
[ { "identifier": "MultitaskDataset", "path": "metax/data/base.py", "snippet": "class MultitaskDataset(NamedTuple):\n x: Array\n y: Array\n task_id: Array\n info: Dict = dict()" }, { "identifier": "DatasetGenerator", "path": "metax/data/dataset/base.py", "snippet": "class DatasetGenerator(abc.ABC):\n \"\"\"\n Abstract base class for generated datasets.\n\n Attributes:\n input_shape (tuple): The shape of the input data.\n output_dim (int): The dimensionality of the output data.\n \"\"\"\n def __init__(self, input_shape: Tuple[int], output_dim: int) -> None:\n self.input_shape = input_shape\n self.output_dim = output_dim\n\n @abc.abstractmethod\n def sample(self, rng: chex.PRNGKey, num_tasks: int, num_samples: int, mode: str) -> Dataset:\n \"\"\"\n Generate a batch of tasks.\n\n Args:\n rng (jax.random.PRNGKey): The random number generator to use.\n num_tasks (int): The number of tasks to generate.\n num_samples (int): The number of samples per task.\n mode (str): The mode of the generated data (e.g. 'train', 'test', 'ood').\n\n Returns:\n A namedtuple `Dataset` (x, y) containing the input and output data for the generated tasks.\n x has shape (num_tasks, num_samples) + input_shape.\n y has shape (num_tasks, num_samples, output_dim).\n \"\"\"\n pass" }, { "identifier": "MultilayerPerceptron", "path": "metax/models/mlp.py", "snippet": "class MultilayerPerceptron(hk.Module):\n def __init__(\n self,\n output_sizes: Iterable[int],\n w_init: Optional[hk.initializers.Initializer] = None,\n b_init: Optional[hk.initializers.Initializer] = None,\n with_bias: bool = True,\n activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.relu,\n activate_final: bool = False,\n batch_norm: bool = False,\n reparametrized_linear: bool = False,\n names_layers: Optional[List[str]] = None,\n name: Optional[str] = None,\n ):\n \"\"\"Constructs an MLP.\n\n Args:\n output_sizes: Sequence of layer sizes.\n w_init: Initializer for :class:`~haiku.Linear` weights.\n b_init: Initializer for :class:`~haiku.Linear` bias. Must be ``None`` if\n ``with_bias=False``.\n with_bias: Whether or not to apply a bias in each layer.\n activation: Activation function to apply between :class:`~haiku.Linear`\n layers. Defaults to ReLU.\n activate_final: Whether or not to activate the final layer of the MLP.\n batch_norm: Whether or not to add batch_norm after each linear layer.\n name: Optional name for this module.\n\n Raises:\n ValueError: If ``with_bias`` is ``False`` and ``b_init`` is not ``None``.\n \"\"\"\n if not with_bias and b_init is not None:\n raise ValueError(\"When with_bias=False b_init must not be set.\")\n\n super().__init__(name=name)\n self.with_bias = with_bias\n self.w_init = w_init\n self.b_init = b_init\n self.activation = activation\n self.activate_final = activate_final\n layers = []\n output_sizes = tuple(output_sizes)\n for i, output_size in enumerate(output_sizes):\n if names_layers is not None:\n name = names_layers[i]\n else:\n name = \"linear_{}\".format(i)\n\n layers.append(\n LinearBlock(\n output_size=output_size,\n w_init=w_init,\n b_init=b_init,\n with_bias=with_bias,\n batch_norm=batch_norm,\n reparametrized_linear=reparametrized_linear,\n name=name,\n )\n )\n self.layers = tuple(layers)\n self.output_size = output_sizes[-1] if output_sizes else None\n\n def __call__(\n self,\n inputs: jnp.ndarray,\n is_training: bool,\n gain: Optional[jnp.ndarray] = None,\n shift: Optional[jnp.ndarray] = None,\n skip_readout: Optional[bool] = False,\n dropout_rate: Optional[float] = None,\n ) -> jnp.ndarray:\n \"\"\"\n Multilayer perceptron with optional gain and shift modulation and skipping of readout layer.\n Args:\n inputs: A Tensor of shape ``[batch_size, input_size]``.\n gain: An optional list of Tensors of length ``num_layers``\n and of shapes ``hidden_dims + [output_dim]``\n shift: An optional list of Tensors of length ``num_layers``\n and of shapes ``hidden_dims + [output_dim]``\n skip_readout: An optional bool indicating whether to skip last readout layer\n dropout_rate: Optional dropout rate.\n rng: Optional RNG key. Require when using dropout.\n\n Returns:\n The output of the model of size ``[batch_size, output_size]``.\n \"\"\"\n num_layers = len(self.layers)\n\n out = hk.Flatten(preserve_dims=1)(inputs)\n\n for i, layer in enumerate(self.layers):\n if i < (num_layers - 1) or not skip_readout:\n out = layer(out, is_training)\n if gain is not None:\n out = out * gain[i]\n if shift is not None:\n out = out + shift[i]\n if i < (num_layers - 1) or self.activate_final:\n # Only perform dropout if we are activating the output.\n if dropout_rate is not None and is_training:\n out = hk.dropout(hk.next_rng_key(), dropout_rate, out)\n out = self.activation(out)\n\n return out" }, { "identifier": "PytreeReshaper", "path": "metax/utils/pytree.py", "snippet": "class PytreeReshaper:\n def __init__(self, tree_shapes):\n self.shapes, self.treedef = jtu.tree_flatten(\n tree_shapes, is_leaf=is_tuple_of_ints\n )\n sizes = [math.prod(shape) for shape in self.shapes]\n\n self.split_indeces = list(np.cumsum(sizes)[:-1])\n self.num_elements = sum(sizes)\n\n def __call__(self, array_flat):\n arrays_split = jnp.split(array_flat, self.split_indeces)\n arrays_reshaped = [a.reshape(shape) for a, shape in zip(arrays_split, self.shapes)]\n\n return jtu.tree_unflatten(self.treedef, arrays_reshaped)\n\n @staticmethod\n def flatten(pytree):\n return jnp.concatenate([jnp.ravel(e) for e in jtu.tree_flatten(pytree)[0]])" } ]
import itertools import haiku as hk import jax import jax.numpy as jnp import jax.tree_util as jtu import numpy as np from functools import partial from metax.data.base import MultitaskDataset from metax.data.dataset.base import DatasetGenerator from metax.models.mlp import MultilayerPerceptron from metax.utils import PytreeReshaper
1,893
""" Copyright (c) Simon Schug All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """
""" Copyright (c) Simon Schug All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """
class HyperTeacher(DatasetGenerator):
1
2023-12-22 16:35:49+00:00
4k
kyegomez/qformer
qformer/model.py
[ { "identifier": "ImgBlock", "path": "qformer/blocks.py", "snippet": "class ImgBlock(nn.Module):\n \"\"\"\n ImgBlock is a module that performs multi-query attention, cross-attention, and feedforward operations on input tensors.\n\n Args:\n dim (int): The dimension of the input tensors.\n depth (int): The number of times the operations are applied.\n heads (int): The number of attention heads.\n dropout (float, optional): The dropout probability. Defaults to 0.1.\n emb_dropout (float, optional): The embedding dropout probability. Defaults to 0.1.\n\n Attributes:\n dim (int): The dimension of the input tensors.\n depth (int): The number of times the operations are applied.\n heads (int): The number of attention heads.\n dropout (float): The dropout probability.\n emb_dropout (float): The embedding dropout probability.\n attn (MultiQueryAttention): The multi-query attention module.\n cross_attn (CrossAttention): The cross-attention module.\n feedforward (SimpleFeedForward): The feedforward module.\n\n Methods:\n forward(x: Tensor, img: Tensor) -> Tensor:\n Performs the forward pass of the ImgBlock module.\n\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n depth: int,\n heads: int,\n dropout: float = 0.1,\n *args,\n **kwargs,\n ):\n super(ImgBlock, self).__init__(*args, **kwargs)\n self.dim = dim\n self.depth = depth\n self.heads = heads\n self.dropout = dropout\n self.attn = MultiQueryAttention(dim, heads)\n self.cross_attn = CrossAttention(\n dim=dim,\n heads=heads,\n dropout=dropout,\n )\n self.feedforward = SimpleFeedForward(dim, dim * 4, dropout)\n\n # Create a list of layers\n self.self_attn_layers = nn.ModuleList([])\n self.cross_attn_layers = nn.ModuleList([])\n self.ffn_layers = nn.ModuleList([])\n\n # Add the attn, cross attention, simple feedforward layers to the list\n for _ in range(depth):\n # Add the multi query attention layer\n self.self_attn_layers.append(\n MultiQueryAttention(dim, heads)\n )\n # Add the cross attention layer\n self.cross_attn_layers.append(\n CrossAttention(dim=dim, heads=heads, dropout=dropout)\n )\n # Add the simple feedforward layer\n self.ffn_layers.append(\n SimpleFeedForward(dim, dim * 4, dropout)\n )\n\n def forward(self, x: Tensor, img: Tensor) -> Tensor:\n \"\"\"\n Performs the forward pass of the ImgBlock module.\n\n Args:\n x (Tensor): The input tensor.\n img (Tensor): The image tensor.\n\n Returns:\n Tensor: The output tensor after applying multi-query attention, cross-attention, and feedforward operations.\n\n \"\"\"\n for self_attn, cross_attn, ffn in zip(\n self.self_attn_layers,\n self.cross_attn_layers,\n self.ffn_layers,\n ):\n x, _, _ = self_attn(x)\n x = cross_attn(x, img)\n x = ffn(x)\n\n return x" }, { "identifier": "TextBlock", "path": "qformer/blocks.py", "snippet": "class TextBlock(nn.Module):\n \"\"\"\n TextBlock module that performs self-attention and feedforward operations.\n\n Args:\n dim (int): The dimension of the input and output tensors.\n heads (int): The number of attention heads.\n depth (int): The number of layers in the module.\n dropout (float, optional): The dropout probability. Defaults to 0.1.\n\n Attributes:\n dim (int): The dimension of the input and output tensors.\n heads (int): The number of attention heads.\n depth (int): The number of layers in the module.\n dropout (float): The dropout probability.\n attn (MultiQueryAttention): The self-attention module.\n feedforward (SimpleFeedForward): The feedforward module.\n layers (nn.ModuleList): The list of layers in the module.\n\n Methods:\n forward(x: Tensor) -> Tensor:\n Performs the forward pass of the TextBlock module.\n\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n heads: int,\n depth: int,\n dropout: float = 0.1,\n *args,\n **kwargs,\n ):\n super().__init__()\n self.dim = dim\n self.heads = heads\n self.depth = depth\n self.dropout = dropout\n\n self.attn = MultiQueryAttention(dim, heads)\n self.feedforward = SimpleFeedForward(dim, dim * 4, dropout)\n self.layers = nn.ModuleList([])\n self.ffn_layers = nn.ModuleList([])\n\n for _ in range(depth):\n self.layers.append(MultiQueryAttention(dim, heads))\n\n self.ffn_layers.append(\n SimpleFeedForward(dim, dim * 4, dropout)\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"\n Performs the forward pass of the TextBlock module.\n\n Args:\n x (Tensor): The input tensor.\n\n Returns:\n Tensor: The output tensor after self-attention and feedforward operations.\n\n \"\"\"\n for attn, ffn in zip(self.layers, self.ffn_layers):\n x, _, _ = attn(x)\n x = ffn(x)\n return x" }, { "identifier": "mask_top_right_quadrant", "path": "qformer/masking.py", "snippet": "def mask_top_right_quadrant(tensor):\n \"\"\"\n Masks the top right quadrant of a tensor.\n\n Args:\n tensor (Tensor): The input tensor.\n\n Returns:\n Tensor: The masked tensor.\n \"\"\"\n rows, cols = tensor.shape[-2:]\n mask = torch.ones(rows, cols)\n mask[: rows // 2, cols // 2 :] = 0\n return tensor * mask" } ]
from torch import Tensor, nn from qformer.blocks import ImgBlock, TextBlock from qformer.masking import mask_top_right_quadrant
1,891
class QFormer(nn.Module): """ QFormer is a transformer-based model for processing text and image inputs. Args: dim (int): The dimension of the model. heads (int): The number of attention heads. depth (int): The depth of the model. dropout (float, optional): The dropout rate. Defaults to 0.1. text_block_depth (int, optional): The depth of the text block. Defaults to None. img_text_block_depth (int, optional): The depth of the image text block. Defaults to None. Attributes: dim (int): The dimension of the model. heads (int): The number of attention heads. depth (int): The depth of the model. dropout (float): The dropout rate. img_block (ImgBlock): The image block of the model. text_block (TextBlock): The text block of the model. img_layers (nn.ModuleList): The list of image layers. text_layers (nn.ModuleList): The list of text layers. Examples: >>> model = QFormer(dim=512, heads=8, depth=6, dropout=0.1, text_block_depth=2, img_text_block_depth=2) >>> x = torch.randn(1, 10, 512) >>> img = torch.randn(1, 3, 224, 224) >>> out = model(x, img) >>> out.shape torch.Size([1, 10, 512]) """ def __init__( self, dim: int, heads: int, depth: int, dropout: float = 0.1, text_block_depth: int = None, img_text_block_depth: int = None, *args, **kwargs, ): super().__init__() self.dim = dim self.heads = heads self.depth = depth self.dropout = dropout
class QFormer(nn.Module): """ QFormer is a transformer-based model for processing text and image inputs. Args: dim (int): The dimension of the model. heads (int): The number of attention heads. depth (int): The depth of the model. dropout (float, optional): The dropout rate. Defaults to 0.1. text_block_depth (int, optional): The depth of the text block. Defaults to None. img_text_block_depth (int, optional): The depth of the image text block. Defaults to None. Attributes: dim (int): The dimension of the model. heads (int): The number of attention heads. depth (int): The depth of the model. dropout (float): The dropout rate. img_block (ImgBlock): The image block of the model. text_block (TextBlock): The text block of the model. img_layers (nn.ModuleList): The list of image layers. text_layers (nn.ModuleList): The list of text layers. Examples: >>> model = QFormer(dim=512, heads=8, depth=6, dropout=0.1, text_block_depth=2, img_text_block_depth=2) >>> x = torch.randn(1, 10, 512) >>> img = torch.randn(1, 3, 224, 224) >>> out = model(x, img) >>> out.shape torch.Size([1, 10, 512]) """ def __init__( self, dim: int, heads: int, depth: int, dropout: float = 0.1, text_block_depth: int = None, img_text_block_depth: int = None, *args, **kwargs, ): super().__init__() self.dim = dim self.heads = heads self.depth = depth self.dropout = dropout
self.img_block = ImgBlock(dim, depth, heads, dropout)
0
2023-12-29 03:55:46+00:00
4k
willfinnigan/RetroBioCat_2
rbc2/reaction_network_entities/network.py
[ { "identifier": "get_pa_route", "path": "rbc2/pathway_tools/pa_route_conversion.py", "snippet": "def get_pa_route(smi: str,\n starting_material_evaluator: StartingMaterialEvaluatorInterface,\n get_smi_produced_by: Callable[[str], List[Reaction]]) -> dict:\n \"\"\"\n Recursive function which will generate a 'pa_route' - format commonly used by AIZynthfinder associated tools.\n \"\"\"\n\n in_stock = bool(starting_material_evaluator.eval(smi)[0])\n tree = {'type': 'mol', 'smiles': smi, 'children': [], 'in_stock': in_stock}\n for reaction in get_smi_produced_by(smi):\n rxn_branch = {'type': 'reaction', 'smiles': reaction.reaction_smiles(), 'children': []}\n for child_smi in reaction.substrates:\n childs_pa_route = get_pa_route(child_smi, starting_material_evaluator, get_smi_produced_by)\n rxn_branch['children'].append(childs_pa_route)\n\n if len(rxn_branch['children']) == 0: # ensure no empty children lists\n rxn_branch.pop('children')\n tree['children'].append(rxn_branch)\n\n if len(tree['children']) == 0: # ensure no empty children lists\n tree.pop('children')\n\n return tree" }, { "identifier": "StartingMaterialEvaluatorInterface", "path": "rbc2/reaction_evaluation/starting_material_evaluator/starting_material_evaluator_interface.py", "snippet": "class StartingMaterialEvaluatorInterface(ABC):\n\n def __init__(self, config: SourceMol_Config):\n self.config = config\n\n @abstractmethod\n def eval(self, smi: str) -> Tuple[bool, dict]:\n pass\n\n @abstractmethod\n def is_mol_chiral(self, smi: str) -> bool:\n pass" }, { "identifier": "reactions_to_dicts", "path": "rbc2/reaction_network_entities/reaction.py", "snippet": "def reactions_to_dicts(reactions: List[Reaction]) -> List[dict]:\n \"\"\" Converts a list of reactions to a list of dictionaries, such as would be returned by asdict(Reaction) \"\"\"\n\n rxn_dicts = [r.to_dict() for r in reactions]\n\n # drop None values\n rxn_dicts = [{k: v for k, v in rxn_dict.items() if v is not None} for rxn_dict in rxn_dicts]\n\n return rxn_dicts" }, { "identifier": "reaction_from_dict", "path": "rbc2/reaction_network_entities/reaction.py", "snippet": "def reaction_from_dict(reaction_dict) -> Reaction:\n \"\"\" Loads a reaction from a dictionary, such as would be returned by asdict(Reaction) \"\"\"\n\n return Reaction(product=reaction_dict['product'],\n substrates=reaction_dict['substrates'],\n name=reaction_dict['name'],\n rxn_type=reaction_dict['rxn_type'],\n rxn_domain=reaction_dict['rxn_domain'],\n unique_id=reaction_dict['unique_id'],\n\n # these are optional, will just be empty/0 if not present\n score=reaction_dict.get('score', 0),\n precedents=[Precedent(**precedent_dict) for precedent_dict in reaction_dict.get('precedents', [])],\n template_metadata=reaction_dict.get('template_metadata', {}),\n feasability_filter_scores=reaction_dict.get('feasability_filter_scores', {}),\n complexity_change=reaction_dict.get('complexity_change', None))" } ]
from collections import defaultdict from typing import List, Set, Sequence from typing import TYPE_CHECKING from rbc2.pathway_tools.pa_route_conversion import get_pa_route from rbc2.reaction_evaluation.starting_material_evaluator.starting_material_evaluator_interface import \ StartingMaterialEvaluatorInterface from rbc2.reaction_network_entities.reaction import reactions_to_dicts, reaction_from_dict from rbc2.reaction_network_entities.reaction import Reaction from rbc2.reaction_network_entities.reaction_option import ReactionOption from rbc2.expansion.default_expander_interface import Expander
1,822
from __future__ import annotations if TYPE_CHECKING: ReactionID = str OptionID = str ExpanderID = str Smi = str RxnType = str class Network(): """ Network is used to keep a record of the outcome of all expansions.""" def __init__(self, reactions: Sequence[Reaction] = ()): self.smi_produced_by: dict[Smi: Set[Reaction]] = defaultdict(set) self.smi_substrate_of: dict[Smi: Set[Reaction]] = defaultdict(set) self.reaction_options: dict[Smi: dict[ExpanderID: List[ReactionOption]]] = defaultdict(lambda: defaultdict(dict)) self.reactions: Set[Reaction] = set() if len(reactions) != 0: for rxn in reactions: self.add_reaction(rxn) def add_reaction(self, reaction: Reaction): self.reactions.add(reaction) self.smi_produced_by[reaction.product].add(reaction) for smi in reaction.substrates: self.smi_substrate_of[smi].add(reaction) def remove_reaction(self, reaction: Reaction): self.reactions.discard(reaction) self.smi_produced_by[reaction.product].discard(reaction) for smi in reaction.substrates: self.smi_substrate_of[smi].discard(reaction) def add_option(self, option: ReactionOption): self.reaction_options[option.target_smi][option.rxn_type][option.unique_id] = option def bulk_add_options(self, smi: Smi, rxn_type: RxnType, list_options: List[ReactionOption]): self.reaction_options[smi][rxn_type] = {option.unique_id: option for option in list_options} def remove_option(self, option: ReactionOption): self.reaction_options[option.target_smi][option.rxn_type].pop(option.unique_id, None) def get_reaction_options(self, smi: Smi, rxn_type: RxnType) -> list[ReactionOption]: options_for_smi = self.reaction_options.get(smi, {}) options_for_rxn_type = options_for_smi.get(rxn_type, {}) return list(options_for_rxn_type.values()) def are_options_available(self, smi: Smi, rxn_type: RxnType) -> bool: return self.reaction_options.get(smi, {}).get(rxn_type, False) is not False def get_reactions_which_molecule_is_produced_by(self, smi: Smi) -> Set[Reaction]: return self.smi_produced_by.get(smi, set()) def get_reactions_which_molecule_is_substrate_of(self, smi: Smi) -> Set[Reaction]: return self.smi_substrate_of.get(smi, set()) def all_smis(self) -> Set[Smi]: all_smis = set(self.smi_produced_by.keys()) all_smis.update(set(self.smi_substrate_of.keys())) return all_smis def all_reactions(self) -> List[Reaction]: return list(self.reactions) def all_reaction_options(self) -> List[ReactionOption]: all_options = [] for smi, rxn_type_options in self.reaction_options.items(): for rxn_type, options_dict in rxn_type_options.items(): for option_id, option in options_dict.items(): all_options.append(option) return all_options def save(self): """Save the network to a dict""" data = {"reactions": reactions_to_dicts(self.all_reactions()), "reaction_options": [option_to_dict(opt) for opt in self.all_reaction_options()]} return data def load(self, data: dict, expanders: List[Expander]): """ Load the network from data dict ReactionOptions will only be loaded if the relevant expander is provided """ # check each expander is associated with this network for expander in expanders: if expander.network != self: raise Exception("Can not load reaction options when expander is not associated with the same network") # load reactions reaction_unique_id_dict = {} for reaction_dict in data['reactions']:
from __future__ import annotations if TYPE_CHECKING: ReactionID = str OptionID = str ExpanderID = str Smi = str RxnType = str class Network(): """ Network is used to keep a record of the outcome of all expansions.""" def __init__(self, reactions: Sequence[Reaction] = ()): self.smi_produced_by: dict[Smi: Set[Reaction]] = defaultdict(set) self.smi_substrate_of: dict[Smi: Set[Reaction]] = defaultdict(set) self.reaction_options: dict[Smi: dict[ExpanderID: List[ReactionOption]]] = defaultdict(lambda: defaultdict(dict)) self.reactions: Set[Reaction] = set() if len(reactions) != 0: for rxn in reactions: self.add_reaction(rxn) def add_reaction(self, reaction: Reaction): self.reactions.add(reaction) self.smi_produced_by[reaction.product].add(reaction) for smi in reaction.substrates: self.smi_substrate_of[smi].add(reaction) def remove_reaction(self, reaction: Reaction): self.reactions.discard(reaction) self.smi_produced_by[reaction.product].discard(reaction) for smi in reaction.substrates: self.smi_substrate_of[smi].discard(reaction) def add_option(self, option: ReactionOption): self.reaction_options[option.target_smi][option.rxn_type][option.unique_id] = option def bulk_add_options(self, smi: Smi, rxn_type: RxnType, list_options: List[ReactionOption]): self.reaction_options[smi][rxn_type] = {option.unique_id: option for option in list_options} def remove_option(self, option: ReactionOption): self.reaction_options[option.target_smi][option.rxn_type].pop(option.unique_id, None) def get_reaction_options(self, smi: Smi, rxn_type: RxnType) -> list[ReactionOption]: options_for_smi = self.reaction_options.get(smi, {}) options_for_rxn_type = options_for_smi.get(rxn_type, {}) return list(options_for_rxn_type.values()) def are_options_available(self, smi: Smi, rxn_type: RxnType) -> bool: return self.reaction_options.get(smi, {}).get(rxn_type, False) is not False def get_reactions_which_molecule_is_produced_by(self, smi: Smi) -> Set[Reaction]: return self.smi_produced_by.get(smi, set()) def get_reactions_which_molecule_is_substrate_of(self, smi: Smi) -> Set[Reaction]: return self.smi_substrate_of.get(smi, set()) def all_smis(self) -> Set[Smi]: all_smis = set(self.smi_produced_by.keys()) all_smis.update(set(self.smi_substrate_of.keys())) return all_smis def all_reactions(self) -> List[Reaction]: return list(self.reactions) def all_reaction_options(self) -> List[ReactionOption]: all_options = [] for smi, rxn_type_options in self.reaction_options.items(): for rxn_type, options_dict in rxn_type_options.items(): for option_id, option in options_dict.items(): all_options.append(option) return all_options def save(self): """Save the network to a dict""" data = {"reactions": reactions_to_dicts(self.all_reactions()), "reaction_options": [option_to_dict(opt) for opt in self.all_reaction_options()]} return data def load(self, data: dict, expanders: List[Expander]): """ Load the network from data dict ReactionOptions will only be loaded if the relevant expander is provided """ # check each expander is associated with this network for expander in expanders: if expander.network != self: raise Exception("Can not load reaction options when expander is not associated with the same network") # load reactions reaction_unique_id_dict = {} for reaction_dict in data['reactions']:
reaction = reaction_from_dict(reaction_dict)
3
2023-12-30 11:33:41+00:00
4k
DomingoJoseCab/AutoTube
utils/gpt/chatgpt.py
[ { "identifier": "get_product_script", "path": "utils/gpt/generate_script.py", "snippet": "def get_product_script(data_extracted, video_name, i):\r\n\r\n prompt = get_product_replace(data_extracted, video_name, i)\r\n \r\n messages = [\r\n {\"role\": \"system\", \"content\": \"Genera un guion en ESPAÑOL para un narrador de voz en off para un video de YouTube, el video habla de varios productos de Amazon, PERO en este caso deberas generar el guion para un ÚNICO PRODUCTO. El guion del ÚNICO producto debe contener exclusivamente el texto del narrador, solo el texto del narrador sin comillas. Concéntrate en las características únicas, el precio y los beneficios del producto. El guion debe adherirse al sistema de medidas habitual utilizado en España. La narración debe ser coherente y continua para un único capítulo del video, estrictamente limitada a lo que dirá el narrador. Excluye cualquier descripción visual, notas de transición o interacciones con la audiencia. El objetivo es crear una narración informativa y atractiva, adecuada para un video profesional de YouTube.\"},\r\n {\"role\": \"user\", \"content\": prompt}\r\n ]\r\n\r\n respuesta = CLIENT.chat.completions.create(\r\n model=MODELO_GPT4,\r\n messages=messages,\r\n max_tokens=500\r\n )\r\n n = 6-i\r\n product_save_path = os.path.join(FOLDER_PATH, \"1. Scripts\", f\"{n}. product_{n}.txt\")\r\n return guardar_respuesta(respuesta, product_save_path)\r" }, { "identifier": "get_better_intro", "path": "utils/gpt/generate_script.py", "snippet": "def get_better_intro(path_intro,name):\r\n prompt = get_intro_replace(path_intro,name)\r\n \r\n messages = [\r\n {\"role\": \"system\", \"content\": \"Dada una introducción base únicamente quiero que adecues correctamente el genero de los artículos. Únicamente devuelve la introducción en CASTELLANO, nada más. Solo la introducción.\"},\r\n {\"role\": \"user\", \"content\": prompt}\r\n ]\r\n \r\n\r\n respuesta = CLIENT.chat.completions.create(\r\n model=MODELO_GPT4,\r\n messages=messages,\r\n max_tokens=150\r\n )\r\n intro_save_path = os.path.join(FOLDER_PATH, \"1. Scripts\", \"0. intro.txt\")\r\n return guardar_respuesta(respuesta,intro_save_path)\r" }, { "identifier": "get_better_outro", "path": "utils/gpt/generate_script.py", "snippet": "def get_better_outro(path_outro, name):\r\n prompt = get_intro_replace(path_outro,name)\r\n\r\n messages = [\r\n {\"role\": \"system\", \"content\": \"Dada una despedida base únicamente quiero que adecues correctamente el genero de los artículos. Únicamente devuelve la despedida en CASTELLANO, nada más. Solo la despedida.\"},\r\n {\"role\": \"user\", \"content\": prompt}\r\n ]\r\n \r\n\r\n respuesta = CLIENT.chat.completions.create(\r\n model=MODELO_GPT4,\r\n messages=messages,\r\n max_tokens=150\r\n )\r\n\r\n outro_save_path = os.path.join(FOLDER_PATH, \"1. Scripts\", \"7. outro.txt\")\r\n return guardar_respuesta(respuesta,outro_save_path)" }, { "identifier": "set_up_generate_script", "path": "utils/gpt/generate_script.py", "snippet": "def set_up_generate_script(client, modelo, folder_path):\r\n global CLIENT, MODELO_GPT4, FOLDER_PATH\r\n CLIENT = client\r\n MODELO_GPT4 = modelo\r\n FOLDER_PATH = folder_path\r" }, { "identifier": "get_description", "path": "utils/gpt/generate_description.py", "snippet": "def get_description(description_path, data):\r\n prompt_replace = get_prompt_replaced(description_path, data)\r\n \r\n messages = [\r\n {\"role\": \"system\", \"content\": \"Dada una descripcción quiero que dejes bien escrito todo, asegúrate de que del genero y la coherencia gramátical. Únicamente quiero que me devuelvas la descripción, nada más. Es decir, yo te paso la descripción, la revisas y me la devuelves sin añadir ningún mensaje más, SOLO LA DESCRIPCIÓN.\"},\r\n {\"role\": \"user\", \"content\": prompt_replace}\r\n ]\r\n \r\n\r\n respuesta = CLIENT.chat.completions.create(\r\n model=MODELO_GPT4,\r\n messages=messages,\r\n max_tokens=300\r\n )\r\n \r\n description_save_path = os.path.join(FOLDER_PATH, \"1. Scripts\", \"description.txt\")\r\n return guardar_respuesta(respuesta, description_save_path)" }, { "identifier": "set_up_generate_description", "path": "utils/gpt/generate_description.py", "snippet": "def set_up_generate_description(client, modelo, folder_path):\r\n global CLIENT, MODELO_GPT4, FOLDER_PATH\r\n CLIENT = client\r\n MODELO_GPT4 = modelo\r\n FOLDER_PATH = folder_path\r" }, { "identifier": "get_miniature", "path": "utils/gpt/generate_miniature.py", "snippet": "def get_miniature(prompt, name):\r\n\r\n prompt_img = get_prompt_miniature(prompt, name)\r\n\r\n response = CLIENT.images.generate(\r\n model=MODELO_IMG,\r\n prompt=prompt_img,\r\n size=\"1792x1024\",\r\n quality=\"standard\",\r\n n=1,\r\n )\r\n\r\n image_url = response.data[0].url\r\n\r\n img_save_path = os.path.join(FOLDER_PATH, \"3. Miniatures\", \"miniatura.png\")\r\n guardar_imagen(image_url, img_save_path)" }, { "identifier": "set_up_generate_miniature", "path": "utils/gpt/generate_miniature.py", "snippet": "def set_up_generate_miniature(client, modelo, folder_path):\r\n global CLIENT, MODELO_IMG, FOLDER_PATH\r\n CLIENT = client\r\n MODELO_IMG = modelo\r\n FOLDER_PATH = folder_path\r" } ]
from openai import OpenAI from utils.gpt.generate_script import get_product_script, get_better_intro, get_better_outro, set_up_generate_script from utils.gpt.generate_description import get_description, set_up_generate_description from utils.gpt.generate_miniature import get_miniature, set_up_generate_miniature import os import json
1,786
# ============================================================================== # AutoTube Script # Creado por: Domingo Caballero # Canal de YouTube: https://www.youtube.com/@emprendedomingo?=sub_confirmation=1 # Lista de Correo: https://emprendecondomingo.substack.com/ # ============================================================================== with open('../AutoTube/argss.json', 'r', encoding='utf-8') as archivo: datos = json.load(archivo) OPENAI_API_KEY = datos['OPENAI_API_KEY'] CLIENT = OpenAI(api_key=OPENAI_API_KEY) MODELO_GPT4 = "gpt-4" MODELO_IMG = "dall-e-3" def set_up(folder_path): set_up_generate_script(CLIENT, MODELO_GPT4, folder_path) set_up_generate_description(CLIENT, MODELO_GPT4, folder_path) set_up_generate_miniature(CLIENT, MODELO_IMG, folder_path) def chatgpt(data, folder_path): set_up(folder_path) list_asins = ['ASIN_TOP5','ASIN_TOP4','ASIN_TOP3','ASIN_TOP2','ASIN_TOP1'] #################### GENERATING INTRO #################### print("Generating intro...") intro_path = os.path.join(datos['scripts_path'],'intro.txt') get_better_intro(intro_path, datos['video_name']) print("Intro generated.") print("-----------------------------") #################### GENERATING PRODUCTS #################### for i, product in enumerate(list_asins): print(f"Generating product {5-i}...") get_product_script(data[product],datos['video_name'],5-i) print("Products generated.") print("-----------------------------") #################### GENERATING OUTRO #################### print("Generating outro...") outro_path = os.path.join(datos['scripts_path'],'outro.txt')
# ============================================================================== # AutoTube Script # Creado por: Domingo Caballero # Canal de YouTube: https://www.youtube.com/@emprendedomingo?=sub_confirmation=1 # Lista de Correo: https://emprendecondomingo.substack.com/ # ============================================================================== with open('../AutoTube/argss.json', 'r', encoding='utf-8') as archivo: datos = json.load(archivo) OPENAI_API_KEY = datos['OPENAI_API_KEY'] CLIENT = OpenAI(api_key=OPENAI_API_KEY) MODELO_GPT4 = "gpt-4" MODELO_IMG = "dall-e-3" def set_up(folder_path): set_up_generate_script(CLIENT, MODELO_GPT4, folder_path) set_up_generate_description(CLIENT, MODELO_GPT4, folder_path) set_up_generate_miniature(CLIENT, MODELO_IMG, folder_path) def chatgpt(data, folder_path): set_up(folder_path) list_asins = ['ASIN_TOP5','ASIN_TOP4','ASIN_TOP3','ASIN_TOP2','ASIN_TOP1'] #################### GENERATING INTRO #################### print("Generating intro...") intro_path = os.path.join(datos['scripts_path'],'intro.txt') get_better_intro(intro_path, datos['video_name']) print("Intro generated.") print("-----------------------------") #################### GENERATING PRODUCTS #################### for i, product in enumerate(list_asins): print(f"Generating product {5-i}...") get_product_script(data[product],datos['video_name'],5-i) print("Products generated.") print("-----------------------------") #################### GENERATING OUTRO #################### print("Generating outro...") outro_path = os.path.join(datos['scripts_path'],'outro.txt')
get_better_outro(outro_path, datos['video_name'])
2
2023-12-28 16:15:37+00:00
4k
gregorybchris/typogenetics
typogenetics/search.py
[ { "identifier": "Base", "path": "typogenetics/typogenetics.py", "snippet": "class Base(StrEnum):\n C = auto()\n G = auto()\n T = auto()\n A = auto()\n\n @classmethod\n def from_str(cls, base_str: str) -> \"Base\":\n return {\n \"C\": cls.C,\n \"G\": cls.G,\n \"T\": cls.T,\n \"A\": cls.A,\n }[base_str]\n\n def __repr__(self) -> str:\n return self.value.upper()\n\n def __str__(self) -> str:\n return self.__repr__()\n\n def is_type(self, base_type: BaseType) -> bool:\n if base_type == BaseType.PURINE:\n return self.is_purine()\n return self.is_pyrimidine()\n\n def is_purine(self) -> bool:\n return self in [Base.A, Base.G]\n\n def is_pyrimidine(self) -> bool:\n return self in [Base.C, Base.T]\n\n def get_complement(self) -> \"Base\":\n return {\n Base.C: Base.G,\n Base.G: Base.C,\n Base.T: Base.A,\n Base.A: Base.T,\n }[self]" }, { "identifier": "Rewriter", "path": "typogenetics/typogenetics.py", "snippet": "class Rewriter:\n \"\"\"\n | ins | action |\n | --- | ---------------------------------------------- |\n | cut | cut strand(s) |\n | del | delete a base from strand |\n | swi | switch enzyme to other strand |\n | mvr | move one unit to the right |\n | mvl | move one unit to the left |\n | cop | turn on Copy mode |\n | off | turn off Copy mode |\n | ina | insert A to the right of this unit |\n | inc | insert C to the right of this unit |\n | ing | insert G to the right of this unit |\n | int | insert T to the right of this unit |\n | rpy | search for the nearest pyrimidine to the right |\n | rpu | search for the nearest purine to the right |\n | lpy | search for the nearest pyrimidine to the left |\n | lpu | search for the nearest purine to the left |\n \"\"\"\n\n # pylint: disable=too-many-branches\n @classmethod\n def rewrite(cls, enzyme: Enzyme, strand: Strand) -> List[Strand]:\n copy_mode = False\n\n unit = Folder.get_binding_site(enzyme, strand)\n logger.debug(f\"Rewriting strand {strand} with enzyme {enzyme}, unit={unit}\")\n if unit is None:\n return [strand]\n\n pairs = [BasePair(base, None) for base in strand.iter_bases()]\n\n logger.debug(f\"Init @ {unit}, copy={copy_mode}\")\n logger.debug(cls.pairs_to_string(pairs))\n\n strands = []\n for amino_acid in enzyme.iter_amino_acids():\n logger.debug(f\"Applying {amino_acid} @ {unit}, copy={copy_mode}\")\n\n if amino_acid == AminoAcid.CUT:\n cut_pairs = pairs[unit + 1 :]\n strands += cls.strands_from_pairs(cut_pairs)\n pairs = pairs[: unit + 1]\n elif amino_acid == AminoAcid.DEL:\n pairs[unit].bind = None\n unit -= 1\n # NOTE: It's not clear from the specification which direction we should move\n # after a deletion, we here we choose left arbitrarily.\n if unit < 0:\n logger.debug(\"Reached end of strand\")\n break\n if pairs[unit].bind is None:\n logger.debug(\"Reached end of strand\")\n break\n elif amino_acid == AminoAcid.SWI:\n if pairs[unit].comp is None:\n logger.debug(\"Tried to switch to empty base pair complement\")\n break\n for pair in pairs:\n pair.swap()\n pairs = pairs[::-1]\n unit = len(pairs) - unit - 1\n elif amino_acid in [AminoAcid.MVR, AminoAcid.MVL]:\n unit += cls.amino_acid_to_direction(amino_acid)\n if unit < 0 or unit >= len(pairs):\n logger.debug(\"Reached end of strand\")\n break\n if pairs[unit].bind is None:\n logger.debug(\"Reached end of strand\")\n break\n if copy_mode:\n pairs[unit].add_comp()\n elif amino_acid == AminoAcid.COP:\n copy_mode = True\n pair = pairs[unit]\n assert pair.bind is not None\n pair.comp = pair.bind.get_complement()\n elif amino_acid == AminoAcid.OFF:\n copy_mode = False\n elif amino_acid in [AminoAcid.INA, AminoAcid.INC, AminoAcid.ING, AminoAcid.INT]:\n bind = cls.amino_acid_to_base(amino_acid)\n comp = bind.get_complement() if copy_mode else None\n pairs.insert(unit + 1, BasePair(bind, comp))\n elif amino_acid in [AminoAcid.RPY, AminoAcid.RPU, AminoAcid.LPY, AminoAcid.LPU]:\n end_of_strand = False\n while True:\n unit += cls.amino_acid_to_direction(amino_acid)\n if unit < 0 or unit >= len(pairs):\n end_of_strand = True\n break\n pair = pairs[unit]\n bind_base = pair.bind\n if bind_base is None:\n end_of_strand = True\n break\n if copy_mode:\n pair.add_comp()\n if bind_base.is_type(cls.amino_acid_to_base_type(amino_acid)):\n break\n if end_of_strand:\n logger.debug(\"Reached end of strand\")\n break\n\n logger.debug(cls.pairs_to_string(pairs))\n\n strands += cls.strands_from_pairs(pairs)\n return strands\n\n @classmethod\n def strands_from_pairs(cls, pairs: List[BasePair]) -> List[Strand]:\n strands = []\n bind_bases = []\n comp_bases = []\n\n for pair in pairs:\n if pair.bind is not None:\n bind_bases.append(pair.bind)\n elif len(bind_bases) > 0:\n strands.append(Strand(bind_bases))\n bind_bases = []\n\n if pair.comp is not None:\n comp_bases.append(pair.comp)\n elif len(comp_bases) > 0:\n strands.append(Strand(comp_bases[::-1]))\n comp_bases = []\n\n if len(bind_bases) > 0:\n strands.append(Strand(bind_bases))\n if len(comp_bases) > 0:\n strands.append(Strand(comp_bases[::-1]))\n\n return strands\n\n @classmethod\n def amino_acid_to_base(cls, amino_acid: AminoAcid) -> Base:\n return {\n AminoAcid.INA: Base.A,\n AminoAcid.INC: Base.C,\n AminoAcid.ING: Base.G,\n AminoAcid.INT: Base.T,\n }[amino_acid]\n\n @classmethod\n def amino_acid_to_base_type(cls, amino_acid: AminoAcid) -> BaseType:\n return {\n AminoAcid.RPY: BaseType.PYRIMIDINE,\n AminoAcid.RPU: BaseType.PURINE,\n AminoAcid.LPY: BaseType.PYRIMIDINE,\n AminoAcid.LPU: BaseType.PURINE,\n }[amino_acid]\n\n @classmethod\n def amino_acid_to_direction(cls, amino_acid: AminoAcid) -> int:\n return {\n AminoAcid.RPY: 1,\n AminoAcid.RPU: 1,\n AminoAcid.LPY: -1,\n AminoAcid.LPU: -1,\n AminoAcid.MVR: 1,\n AminoAcid.MVL: -1,\n }[amino_acid]\n\n @classmethod\n def pairs_to_string(cls, pairs: List[BasePair]) -> str:\n res = \"[ \"\n comp_map = {Base.A: \"∀\", Base.C: \"Ↄ\", Base.G: \"⅁\", Base.T: \"⊥\"}\n for pair in pairs:\n if pair.comp is None:\n res += \" \"\n else:\n res += str(comp_map[pair.comp]) + \" \"\n res += \"]\\n[ \"\n for pair in pairs:\n if pair.bind is None:\n res += \" \"\n else:\n res += str(pair.bind) + \" \"\n res += \"]\"\n return res" }, { "identifier": "Strand", "path": "typogenetics/typogenetics.py", "snippet": "class Strand:\n bases: List[Base]\n\n @classmethod\n def from_str(cls, strand_str: str) -> \"Strand\":\n bases = []\n for base_str in strand_str:\n if base_str == \" \":\n continue\n base = Base.from_str(base_str)\n bases.append(base)\n return cls(bases)\n\n def iter_bases(self) -> Iterator[Base]:\n yield from self.bases\n\n def iter_duplets(self) -> Iterator[Duplet]:\n unit = 0\n while True:\n if unit + 1 >= len(self):\n break\n\n yield (self[unit], self[unit + 1])\n\n unit += 2\n\n def __repr__(self) -> str:\n return \"\".join([str(b) for b in self.bases])\n\n def __str__(self) -> str:\n return self.__repr__()\n\n def __getitem__(self, unit: int) -> Base:\n return self.bases[unit]\n\n def __len__(self) -> int:\n return len(self.bases)" }, { "identifier": "Translator", "path": "typogenetics/typogenetics.py", "snippet": "class Translator:\n \"\"\"\n | | A | C | G | T |\n | --- | --- | --- | --- | --- |\n | A | | cut | del | swi |\n | C | mvr | mvl | cop | off |\n | G | ina | inc | ing | int |\n | T | rpy | rpu | lpy | lpu |\n \"\"\"\n\n @classmethod\n def translate(cls, strand: Strand) -> List[Enzyme]:\n enzymes = []\n amino_acids: List[AminoAcid] = []\n for duplet in strand.iter_duplets():\n amino_acid = cls._translate_duplet(duplet)\n if amino_acid is None and len(amino_acids) > 0:\n enzyme = Enzyme(amino_acids)\n enzymes.append(enzyme)\n amino_acids = []\n elif amino_acid is not None:\n amino_acids.append(amino_acid)\n\n if len(amino_acids) > 0:\n enzyme = Enzyme(amino_acids)\n enzymes.append(enzyme)\n\n return enzymes\n\n @classmethod\n def _translate_duplet(cls, duplet: Duplet) -> Optional[AminoAcid]:\n return {\n (Base.A, Base.A): None,\n (Base.A, Base.C): AminoAcid.CUT,\n (Base.A, Base.G): AminoAcid.DEL,\n (Base.A, Base.T): AminoAcid.SWI,\n (Base.C, Base.A): AminoAcid.MVR,\n (Base.C, Base.C): AminoAcid.MVL,\n (Base.C, Base.G): AminoAcid.COP,\n (Base.C, Base.T): AminoAcid.OFF,\n (Base.G, Base.A): AminoAcid.INA,\n (Base.G, Base.C): AminoAcid.INC,\n (Base.G, Base.G): AminoAcid.ING,\n (Base.G, Base.T): AminoAcid.INT,\n (Base.T, Base.A): AminoAcid.RPY,\n (Base.T, Base.C): AminoAcid.RPU,\n (Base.T, Base.G): AminoAcid.LPY,\n (Base.T, Base.T): AminoAcid.LPU,\n }[duplet]" } ]
import logging import numpy as np from enum import StrEnum, auto from queue import Queue from typing import Optional, Tuple from numpy.random import Generator from typogenetics.typogenetics import Base, Rewriter, Strand, Translator
3,114
logger = logging.getLogger(__name__) class EditType(StrEnum): MUTATE = auto() INSERT = auto() DELETE = auto() class Editor: PROB_MUTATE = 0.80 PROB_INSERT = 0.10 PROB_DELETE = 0.10 @classmethod
logger = logging.getLogger(__name__) class EditType(StrEnum): MUTATE = auto() INSERT = auto() DELETE = auto() class Editor: PROB_MUTATE = 0.80 PROB_INSERT = 0.10 PROB_DELETE = 0.10 @classmethod
def edit(cls, strand: Strand, rng: Generator) -> Strand:
2
2023-12-28 08:59:06+00:00
4k
chaoren2357/gsplatstudio
gsplatstudio/models/structOptim/splitAcloneAprune.py
[ { "identifier": "build_rotation", "path": "gsplatstudio/utils/general_utils.py", "snippet": "def build_rotation(r):\n norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])\n\n q = r / norm[:, None]\n\n R = torch.zeros((q.size(0), 3, 3), device='cuda')\n\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n\n R[:, 0, 0] = 1 - 2 * (y*y + z*z)\n R[:, 0, 1] = 2 * (x*y - r*z)\n R[:, 0, 2] = 2 * (x*z + r*y)\n R[:, 1, 0] = 2 * (x*y + r*z)\n R[:, 1, 1] = 1 - 2 * (x*x + z*z)\n R[:, 1, 2] = 2 * (y*z - r*x)\n R[:, 2, 0] = 2 * (x*z - r*y)\n R[:, 2, 1] = 2 * (y*z + r*x)\n R[:, 2, 2] = 1 - 2 * (x*x + y*y)\n return R" }, { "identifier": "inverse_sigmoid", "path": "gsplatstudio/utils/general_utils.py", "snippet": "def inverse_sigmoid(x):\n return torch.log(x/(1-x))" }, { "identifier": "parse_structured", "path": "gsplatstudio/utils/config.py", "snippet": "def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:\n scfg = OmegaConf.structured(fields(**cfg))\n return scfg" } ]
import torch import gsplatstudio from gsplatstudio.utils.general_utils import build_rotation, inverse_sigmoid from gsplatstudio.utils.type_utils import * from gsplatstudio.utils.config import parse_structured
1,950
densify_from_iter: int = 500 densify_until_iter: int = 15000 densify_grad_threshold: float = 0.0002 densification_interval: int = 100 size_threshold: int = 20 min_opacity: float = 0.005 num_split: int = 2 @gsplatstudio.register("split.clone.prune-structOptim") class splitAcloneAprune: def __init__(self, cfg): self.cfg = parse_structured(splitAcloneApruneConfig, cfg) @property def state(self): return ( self.max_radii2D, self.xyz_gradient_accum, self.denom ) def restore(self, state, spatial_lr_scale): (self.max_radii2D, self.xyz_gradient_accum, self.denom) = state self.spatial_lr_scale = spatial_lr_scale def init_optim(self,model, spatial_lr_scale): self.spatial_lr_scale = spatial_lr_scale self.reset_stats(model) def update(self, iteration, model, paramOptim, render_pkg, is_white_background): viewspace_point_tensor, visibility_filter, radii = render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"] if iteration < self.cfg.densify_until_iter: # Keep track of max radii in image-space for pruning self.max_radii2D[visibility_filter] = torch.max(self.max_radii2D[visibility_filter], radii[visibility_filter]) self.xyz_gradient_accum[visibility_filter] += torch.norm(viewspace_point_tensor.grad[visibility_filter,:2], dim=-1, keepdim=True) self.denom[visibility_filter] += 1 if iteration > self.cfg.densify_from_iter and iteration % self.cfg.densification_interval == 0: self.densify_and_prune(iteration, model, paramOptim) if iteration % self.cfg.opacity_reset_interval == 0 or (is_white_background and iteration == self.cfg.densify_from_iter): self.reset_model_opacity(model, paramOptim) def should_start_limit_size(self,iteration): return iteration > self.cfg.opacity_reset_interval def densify_and_prune(self, iteration, model, paramOptim): grads = self.xyz_gradient_accum / self.denom grads[grads.isnan()] = 0.0 self.densify_and_clone(model, paramOptim, grads) self.densify_and_split(model, paramOptim, grads) prune_mask = (model.opacity < self.cfg.min_opacity).squeeze() if self.should_start_limit_size(iteration): big_points_vs = self.max_radii2D > self.cfg.size_threshold big_points_ws = model.scaling.max(dim=1).values > 0.1 * self.spatial_lr_scale prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws) self.prune_points(prune_mask, model, paramOptim) torch.cuda.empty_cache() def densify_and_clone(self, model, paramOptim, grads): # Extract points that satisfy the gradient condition selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= self.cfg.densify_grad_threshold, True, False) selected_pts_mask = torch.logical_and(selected_pts_mask, torch.max(model.scaling, dim=1).values <= self.cfg.percent_dense*self.spatial_lr_scale) new_tensors_dict = { "xyz": model._xyz[selected_pts_mask], "f_dc": model._features_dc[selected_pts_mask], "f_rest": model._features_rest[selected_pts_mask], "opacity": model._opacity[selected_pts_mask], "scaling" : model._scaling[selected_pts_mask], "rotation" : model._rotation[selected_pts_mask] } self.densification_postfix(model, paramOptim, new_tensors_dict) def densify_and_split(self, model, paramOptim, grads): # Extract points that satisfy the gradient condition padded_grad = torch.zeros((model.xyz.shape[0]), device="cuda") padded_grad[:grads.shape[0]] = grads.squeeze() selected_pts_mask = torch.where(padded_grad >= self.cfg.densify_grad_threshold, True, False) selected_pts_mask = torch.logical_and(selected_pts_mask, torch.max(model.scaling, dim=1).values > self.cfg.percent_dense*self.spatial_lr_scale) stds = model.scaling[selected_pts_mask].repeat(self.cfg.num_split,1) means = torch.zeros((stds.size(0), 3),device="cuda") samples = torch.normal(mean=means, std=stds) rots = build_rotation(model._rotation[selected_pts_mask]).repeat(self.cfg.num_split,1,1) new_tensors_dict = { "xyz": torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + model.xyz[selected_pts_mask].repeat(self.cfg.num_split, 1), "f_dc": model._features_dc[selected_pts_mask].repeat(self.cfg.num_split,1,1), "f_rest": model._features_rest[selected_pts_mask].repeat(self.cfg.num_split,1,1), "opacity": model._opacity[selected_pts_mask].repeat(self.cfg.num_split,1), "scaling" : model.scaling_inverse_activation(model.scaling[selected_pts_mask].repeat(self.cfg.num_split,1) / (0.8*self.cfg.num_split)), "rotation" : model._rotation[selected_pts_mask].repeat(self.cfg.num_split,1) } self.densification_postfix(model, paramOptim, new_tensors_dict) prune_filter = torch.cat((selected_pts_mask, torch.zeros(self.cfg.num_split * selected_pts_mask.sum(), device="cuda", dtype=bool))) self.prune_points(prune_filter, model, paramOptim) def prune_points(self, mask, model, paramOptim): valid_points_mask = ~mask optimizable_tensors = paramOptim.prune_optim(valid_points_mask) model.update_params(optimizable_tensors) self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask] self.denom = self.denom[valid_points_mask] self.max_radii2D = self.max_radii2D[valid_points_mask] def densification_postfix(self, model, paramOptim, new_tensors_dict): optimizable_tensors = paramOptim.cat_tensors(new_tensors_dict) model.update_params(optimizable_tensors) self.reset_stats(model) def reset_model_opacity(self, model, paramOptim):
@dataclass class splitAcloneApruneConfig: max_sh_drgree: int = 3 percent_dense: float = 0.01 opacity_reset_interval: int = 3000 densify_from_iter: int = 500 densify_until_iter: int = 15000 densify_grad_threshold: float = 0.0002 densification_interval: int = 100 size_threshold: int = 20 min_opacity: float = 0.005 num_split: int = 2 @gsplatstudio.register("split.clone.prune-structOptim") class splitAcloneAprune: def __init__(self, cfg): self.cfg = parse_structured(splitAcloneApruneConfig, cfg) @property def state(self): return ( self.max_radii2D, self.xyz_gradient_accum, self.denom ) def restore(self, state, spatial_lr_scale): (self.max_radii2D, self.xyz_gradient_accum, self.denom) = state self.spatial_lr_scale = spatial_lr_scale def init_optim(self,model, spatial_lr_scale): self.spatial_lr_scale = spatial_lr_scale self.reset_stats(model) def update(self, iteration, model, paramOptim, render_pkg, is_white_background): viewspace_point_tensor, visibility_filter, radii = render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"] if iteration < self.cfg.densify_until_iter: # Keep track of max radii in image-space for pruning self.max_radii2D[visibility_filter] = torch.max(self.max_radii2D[visibility_filter], radii[visibility_filter]) self.xyz_gradient_accum[visibility_filter] += torch.norm(viewspace_point_tensor.grad[visibility_filter,:2], dim=-1, keepdim=True) self.denom[visibility_filter] += 1 if iteration > self.cfg.densify_from_iter and iteration % self.cfg.densification_interval == 0: self.densify_and_prune(iteration, model, paramOptim) if iteration % self.cfg.opacity_reset_interval == 0 or (is_white_background and iteration == self.cfg.densify_from_iter): self.reset_model_opacity(model, paramOptim) def should_start_limit_size(self,iteration): return iteration > self.cfg.opacity_reset_interval def densify_and_prune(self, iteration, model, paramOptim): grads = self.xyz_gradient_accum / self.denom grads[grads.isnan()] = 0.0 self.densify_and_clone(model, paramOptim, grads) self.densify_and_split(model, paramOptim, grads) prune_mask = (model.opacity < self.cfg.min_opacity).squeeze() if self.should_start_limit_size(iteration): big_points_vs = self.max_radii2D > self.cfg.size_threshold big_points_ws = model.scaling.max(dim=1).values > 0.1 * self.spatial_lr_scale prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws) self.prune_points(prune_mask, model, paramOptim) torch.cuda.empty_cache() def densify_and_clone(self, model, paramOptim, grads): # Extract points that satisfy the gradient condition selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= self.cfg.densify_grad_threshold, True, False) selected_pts_mask = torch.logical_and(selected_pts_mask, torch.max(model.scaling, dim=1).values <= self.cfg.percent_dense*self.spatial_lr_scale) new_tensors_dict = { "xyz": model._xyz[selected_pts_mask], "f_dc": model._features_dc[selected_pts_mask], "f_rest": model._features_rest[selected_pts_mask], "opacity": model._opacity[selected_pts_mask], "scaling" : model._scaling[selected_pts_mask], "rotation" : model._rotation[selected_pts_mask] } self.densification_postfix(model, paramOptim, new_tensors_dict) def densify_and_split(self, model, paramOptim, grads): # Extract points that satisfy the gradient condition padded_grad = torch.zeros((model.xyz.shape[0]), device="cuda") padded_grad[:grads.shape[0]] = grads.squeeze() selected_pts_mask = torch.where(padded_grad >= self.cfg.densify_grad_threshold, True, False) selected_pts_mask = torch.logical_and(selected_pts_mask, torch.max(model.scaling, dim=1).values > self.cfg.percent_dense*self.spatial_lr_scale) stds = model.scaling[selected_pts_mask].repeat(self.cfg.num_split,1) means = torch.zeros((stds.size(0), 3),device="cuda") samples = torch.normal(mean=means, std=stds) rots = build_rotation(model._rotation[selected_pts_mask]).repeat(self.cfg.num_split,1,1) new_tensors_dict = { "xyz": torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + model.xyz[selected_pts_mask].repeat(self.cfg.num_split, 1), "f_dc": model._features_dc[selected_pts_mask].repeat(self.cfg.num_split,1,1), "f_rest": model._features_rest[selected_pts_mask].repeat(self.cfg.num_split,1,1), "opacity": model._opacity[selected_pts_mask].repeat(self.cfg.num_split,1), "scaling" : model.scaling_inverse_activation(model.scaling[selected_pts_mask].repeat(self.cfg.num_split,1) / (0.8*self.cfg.num_split)), "rotation" : model._rotation[selected_pts_mask].repeat(self.cfg.num_split,1) } self.densification_postfix(model, paramOptim, new_tensors_dict) prune_filter = torch.cat((selected_pts_mask, torch.zeros(self.cfg.num_split * selected_pts_mask.sum(), device="cuda", dtype=bool))) self.prune_points(prune_filter, model, paramOptim) def prune_points(self, mask, model, paramOptim): valid_points_mask = ~mask optimizable_tensors = paramOptim.prune_optim(valid_points_mask) model.update_params(optimizable_tensors) self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask] self.denom = self.denom[valid_points_mask] self.max_radii2D = self.max_radii2D[valid_points_mask] def densification_postfix(self, model, paramOptim, new_tensors_dict): optimizable_tensors = paramOptim.cat_tensors(new_tensors_dict) model.update_params(optimizable_tensors) self.reset_stats(model) def reset_model_opacity(self, model, paramOptim):
opacities_new = inverse_sigmoid(torch.min(model.opacity, torch.ones_like(model.opacity)*0.01))
1
2023-12-22 08:27:26+00:00
4k
onestepai/api_rag
src/api_rag/api_rag_model.py
[ { "identifier": "ServiceApiConfig", "path": "src/config/ServiceApiConfig.py", "snippet": "class ServiceApiConfig(ServiceApiConfigBase):\n def __init__(self):\n ServiceApiConfigBase.__init__(self,\n url_prefix=DockerConfig.URL_PREFIX + DockerConfig.API_VERSION,\n\n version=DockerConfig.API_VERSION,\n title=DockerConfig.API_TITLE,\n description=DockerConfig.API_DESCRIPTION,\n gpt_api_key= DockerConfig.GPT_API_KEY,\n gpt_4_model= DockerConfig.GPT_API_VERSION_4,\n gpt_3_5_model=DockerConfig.GPT_API_VERSION_35,\n prompt_language=DockerConfig.PROMPT_LANGUAGE\n )\n self.__set_predict_request()\n self.__set_predict_response()\n\n def __set_predict_request(self):\n request = ServiceApiConfigBase.api.model('PredictRequest.extractResult', {\n 'utterance': fields.String(description='content'),\n 'model_name': fields.String(description='model name'),\n 'language': fields.String(description='language')\n })\n predict_request = ServiceApiConfigBase.api.model('PredictRequest', {\n 'requestId': fields.String(description='request id'),\n 'request': fields.Nested(request, description='request'),\n 'timestamp': fields.Integer(description='calling timestamp')\n })\n ServiceApiConfigBase.predict_request = predict_request\n\n def __set_predict_response(self):\n response_result = ServiceApiConfigBase.api.model('PredictResponse.responseResult', {\n 'result': fields.String(description='result'),\n 'content': fields.String(description='content')\n })\n predict_response = ServiceApiConfigBase.api.model('PredictResponse', {\n 'requestId': fields.String(description='request id'),\n 'responseResult': fields.Nested(response_result, description='responseResult'),\n 'timestamp': fields.Integer(description='calling timestamp')\n })\n ServiceApiConfigBase.predict_response = predict_response" }, { "identifier": "apis_info", "path": "src/utils/apis.py", "snippet": "class apis_info():\n def __init__(self):\n\n swagger_reader = SwaggerReader()\n api_dic = swagger_reader.read()\n print(api_dic)\n if 'zh_cn' == ServiceApiConfig.prompt_language:\n prompt_means = ',含义是:'\n prompt_type = ',类型是:'\n prompt_object = '对象包含:'\n prompt_parameter = '参数包括:'\n prompt_return = '返回值参数包括:'\n elif 'en_us' == ServiceApiConfig.prompt_language:\n prompt_means = ',it means:'\n prompt_type = ',the parameter\\'s type is:'\n prompt_object = 'the object includes:'\n prompt_parameter = 'the parameters includes:'\n prompt_return = 'the return includes:'\n\n self.api_definitions = {}\n self.api_descriptions = ''\n\n for key, value in api_dic.items():\n api_description = key\n\n api_description += prompt_means\n api_description += value['description']\n api_description += ';'\n self.api_definitions[key] = {}\n\n api_parameter_description = ''\n for parameter, para_value in value['parameters'].items():\n api_parameter_description += parameter\n api_parameter_description += prompt_type\n api_parameter_description += para_value['type']\n api_parameter_description += prompt_means\n api_parameter_description += para_value['description']\n if 'input_definition' in para_value:\n api_parameter_description += prompt_object\n for object_param, object_value in para_value['input_definition'].items():\n api_parameter_description += object_param\n api_parameter_description += prompt_means\n api_parameter_description += object_value['description']\n api_parameter_description += prompt_type\n api_parameter_description += object_value['type']\n api_parameter_description += ';'\n else:\n api_parameter_description += ';'\n if len(api_parameter_description) > 0:\n api_description += prompt_parameter\n api_description += api_parameter_description\n else:\n api_description += ';'\n api_response_description = ''\n for response_para_name, response_para_value in value['output_definition'].items():\n api_response_description += response_para_name\n if 'description' in response_para_value:\n api_response_description += prompt_means\n api_response_description += response_para_value['description']\n api_response_description += prompt_type\n api_response_description += response_para_value['type']\n api_response_description += ';'\n if len(api_response_description) > 0:\n api_description += prompt_return\n api_description += api_response_description\n api_description += '。'\n self.api_descriptions += api_description\n self.api_definitions[key]['input'] = value['parameters']\n self.api_definitions[key]['input_explain'] = api_parameter_description\n self.api_definitions[key]['url'] = api_dic[key]['url']\n self.api_definitions[key]['calling_type'] = api_dic[key]['calling_type']\n self.api_definitions[key]['prefix'] = api_dic[key]['prefix']\n\n self.api_definitions[key]['output'] = api_dic[key]['output_definition']\n self.api_definitions[key]['output_explain'] = api_response_description" }, { "identifier": "GPTChatBot", "path": "src/api_rag/gpt_api.py", "snippet": "class GPTChatBot():\n def __init__(self):\n if 'zh_cn' == ServiceApiConfig.prompt_language:\n self.final_response_prompt = \"请帮我直接回复下面的提问:{},你需要从以下我们内部信息解析,\" \\\n \"帮我回答这个提问并组织答案返回:{},表达和数据表现形式要求考虑最方便h5移动端用户观看(比如markdown富文本形式表达),并详尽清晰。\"\n self.api_select_prompt = \"以下是我们的API列表:{},以下是客户的提问是:{}。请判断能否根据用户的提问内容调用我们的API回答用户提问。如果不能用我们提供的API完成需求,则只回答“unsupported”。\" \\\n \"如果可以用我们提供的API完成需求,则只返回API的名称及参数,只用json表示。\" \\\n \"返回API的json以apis的列表列出api的名字和所需要的调用参数\"\n elif 'en_us' == ServiceApiConfig.prompt_language:\n self.final_response_prompt = \"Please help me directly reply to the following question: {}, you need to analyze the information below I provide\" \\\n \", help me answer this question and organize the answer to return: {}, the expression and data presentation\" \\\n \"form should be considered the most convenient for h5 mobile users to view (such as Markdown rich text format),\" \\\n \" detailed and clear.\"\n self.api_select_prompt = \"The following is my API list: {}, the following is the customer's question: {}. Please determine whether you can call my API with user's question content to answer\" \\\n \" user questions. If the requirements cannot be fulfilled using the APIs I provide, just answer \\\"unsupported\\\". If the requirements can be fulfilled \" \\\n \" using the API I provide, only the name and parameters of the API will be returned, expressed only in json. \" \\\n \"The json returned by the API lists which anme is 'apis', and the element of API lists contains the name of the api and the required call parameters \"\n # try:\n # self.memory = [] if r.get(user_id) is None else json.loads(r.get(user_id))\n # except:\n # self.memory =[]\n\n def call_llm_model(self, model_name, content):\n model = GptModel(model_name)\n response = model.search(content)\n return response\n\n\n # api结果生成类\n def final_generate(self, prompt, output_data, model_name):\n if output_data is None or len(output_data) == 0:\n if 'en_us' == ServiceApiConfig.prompt_language:\n prompt = [{\"role\": \"user\",\n \"content\": \"Please help to answer my question:{}\".format(\n prompt)}]\n else:\n prompt = [{\"role\": \"user\",\n \"content\": \"请帮助回答一下问题:{}\".format(\n prompt)}]\n else:\n prompt = [{\"role\": \"user\", \"content\": self.final_response_prompt.format(prompt, output_data)}]\n print('final_messages', prompt)\n answer = self.call_llm_model(model_name, prompt)\n # answer = response['choices'][0]['message']['content']\n # self.messages.append({\"role\":\"assistant\",\"content\":answer})\n return answer\n\n def api_select(self, prompt, apis_info, model_name):\n prompt = [{\"role\": \"user\", \"content\": self.api_select_prompt.format(\n apis_info.api_descriptions, prompt)}]\n answer = self.call_llm_model(model_name, prompt)\n\n return answer" } ]
import json import requests import logging from src.config.ServiceApiConfig import ServiceApiConfig from src.utils.apis import apis_info from src.api_rag.gpt_api import GPTChatBot
2,513
class APIRAGModel(object): def __init__(self): self.apis_info = apis_info() def call_apis(self, answer, headers): results = '' for api in answer['apis']: result = self.call_api(api, headers) results += result return results def call_api(self, api, headers): url = self.apis_info.api_definitions[api['name']]['url'] prefix = self.apis_info.api_definitions[api['name']]['prefix'] logging.info(str(api) + "-------->" + "url" + "------>" + str(url)) input_data = '' params = {} input_params = {} if 'params' in api: input_params = api['params'] elif 'parameters' in api: input_params = api['parameters'] for key, value in input_params.items(): if key in self.apis_info.api_definitions[api['name']]['input']: if self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'header': headers[key] = input_params[key].encode(encoding='utf-8') elif self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'body': input_data = json.dumps(input_params[key]) elif self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'query': params[key] = input_params[key].encode(encoding='utf-8') logging.info(str(api) + "------>" + "request_data----->" + str(input_data)) output_data = requests.request(method=self.apis_info.api_definitions[api['name']]['calling_type'].upper(), url=url + prefix, headers=headers, params=params, data=input_data.encode(encoding='utf-8')).text output_explain = self.apis_info.api_definitions[api['name']]['output_explain'] logging.info(str(api) + "------>" + "output_data----->" + str(output_data))
class APIRAGModel(object): def __init__(self): self.apis_info = apis_info() def call_apis(self, answer, headers): results = '' for api in answer['apis']: result = self.call_api(api, headers) results += result return results def call_api(self, api, headers): url = self.apis_info.api_definitions[api['name']]['url'] prefix = self.apis_info.api_definitions[api['name']]['prefix'] logging.info(str(api) + "-------->" + "url" + "------>" + str(url)) input_data = '' params = {} input_params = {} if 'params' in api: input_params = api['params'] elif 'parameters' in api: input_params = api['parameters'] for key, value in input_params.items(): if key in self.apis_info.api_definitions[api['name']]['input']: if self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'header': headers[key] = input_params[key].encode(encoding='utf-8') elif self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'body': input_data = json.dumps(input_params[key]) elif self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'query': params[key] = input_params[key].encode(encoding='utf-8') logging.info(str(api) + "------>" + "request_data----->" + str(input_data)) output_data = requests.request(method=self.apis_info.api_definitions[api['name']]['calling_type'].upper(), url=url + prefix, headers=headers, params=params, data=input_data.encode(encoding='utf-8')).text output_explain = self.apis_info.api_definitions[api['name']]['output_explain'] logging.info(str(api) + "------>" + "output_data----->" + str(output_data))
if ServiceApiConfig.prompt_language == 'en_us':
0
2023-12-28 03:13:03+00:00
4k
DerwenAI/textgraphs
textgraphs/gor.py
[ { "identifier": "Edge", "path": "textgraphs/elem.py", "snippet": "class Edge:\n \"\"\"\nA data class representing an edge between two nodes.\n \"\"\"\n src_node: int\n dst_node: int\n kind: RelEnum\n rel: str\n prob: float\n count: int = 1" }, { "identifier": "Node", "path": "textgraphs/elem.py", "snippet": "class Node: # pylint: disable=R0902\n \"\"\"\nA data class representing one node, i.e., an extracted phrase.\n \"\"\"\n node_id: int\n key: str\n span: typing.Union[ spacy.tokens.span.Span, spacy.tokens.token.Token ]\n text: str\n pos: str\n kind: NodeEnum\n loc: typing.List[ typing.List[ int ] ] = field(default_factory = lambda: [])\n label: typing.Optional[ str ] = None\n length: int = 1\n sub_obj: bool = False\n count: int = 0\n neighbors: int = 0\n weight: float = 0.0\n entity: typing.List[ LinkedEntity ] = field(default_factory = lambda: [])\n annotated: bool = False\n\n\n def get_linked_label (\n self\n ) -> typing.Optional[ str ]:\n \"\"\"\nWhen this node has a linked entity, return that IRI.\nOtherwise return its `label` value.\n\n returns:\na label for the linked entity\n \"\"\"\n if len(self.entity) > 0:\n return self.entity[0].iri\n\n return self.label\n\n\n def get_name (\n self\n ) -> str:\n \"\"\"\nReturn a brief name for the graphical depiction of this Node.\n\n returns:\nbrief label to be used in a graph\n \"\"\"\n if self.kind == NodeEnum.IRI:\n return self.label # type: ignore\n if self.kind == NodeEnum.LEM:\n return self.key\n\n return self.text\n\n\n def get_stacked_count (\n self\n ) -> int:\n \"\"\"\nReturn a modified count, to redact verbs and linked entities from\nthe stack-rank partitions.\n\n returns:\ncount, used for re-ranking extracted entities\n \"\"\"\n if self.pos == \"VERB\" or self.kind == NodeEnum.IRI:\n return 0\n\n return self.count\n\n\n def get_pos (\n self\n ) -> typing.Tuple[ int, int ]:\n \"\"\"\nGenerate a position span for `OpenNRE`.\n\n returns:\na position span needed for `OpenNRE` relation extraction\n \"\"\"\n position: typing.Tuple[ int, int ] = ( self.span.idx, self.span.idx + len(self.text) - 1, )\n return position" }, { "identifier": "NodeEnum", "path": "textgraphs/elem.py", "snippet": "class NodeEnum (enum.IntEnum):\n \"\"\"\nEnumeration for the kinds of node categories\n \"\"\"\n DEP = 0 # `spaCy` parse dependency\n LEM = 1 # lemmatized token\n ENT = 2 # named entity\n CHU = 3 # noun chunk\n IRI = 4 # IRI for linked entity\n\n def __str__ (\n self\n ) -> str:\n \"\"\"\nCodec for representing as a string.\n\n returns:\ndecoded string representation of the enumerated value\n \"\"\"\n decoder: typing.List[ str ] = [\n \"dep\",\n \"lem\",\n \"ent\",\n \"chu\",\n \"iri\",\n ]\n\n return decoder[self.value]" }, { "identifier": "RelEnum", "path": "textgraphs/elem.py", "snippet": "class RelEnum (enum.IntEnum):\n \"\"\"\nEnumeration for the kinds of edge relations\n \"\"\"\n DEP = 0 # `spaCy` parse dependency\n CHU = 1 # `spaCy` noun chunk\n INF = 2 # `REBEL` or `OpenNRE` inferred relation\n SYN = 3 # `sense2vec` inferred synonym\n IRI = 4 # `DBPedia` or `Wikidata` linked entity\n\n def __str__ (\n self\n ) -> str:\n \"\"\"\nCodec for representing as a string.\n\n returns:\ndecoded string representation of the enumerated value\n \"\"\"\n decoder: typing.List[ str ] = [\n \"dep\",\n \"inf\",\n \"syn\",\n \"chu\",\n \"iri\",\n ]\n\n return decoder[self.value]" }, { "identifier": "SimpleGraph", "path": "textgraphs/graph.py", "snippet": "class SimpleGraph:\n \"\"\"\nAn in-memory graph used to build a `MultiDiGraph` in NetworkX.\n \"\"\"\n\n def __init__ (\n self\n ) -> None:\n \"\"\"\nConstructor.\n \"\"\"\n self.nodes: typing.Dict[ str, Node ] = OrderedDict()\n self.edges: typing.Dict[ str, Edge ] = {}\n self.lemma_graph: nx.MultiDiGraph = nx.MultiDiGraph()\n\n\n def reset (\n self\n ) -> None:\n \"\"\"\nRe-initialize the data structures, resetting all but the configuration.\n \"\"\"\n self.nodes = OrderedDict()\n self.edges = {}\n self.lemma_graph = nx.MultiDiGraph()\n\n\n def make_node ( # pylint: disable=R0913,R0914\n self,\n tokens: typing.List[ Node ],\n key: str,\n span: spacy.tokens.token.Token,\n kind: NodeEnum,\n text_id: int,\n para_id: int,\n sent_id: int,\n *,\n label: typing.Optional[ str ] = None,\n length: int = 1,\n linked: bool = True,\n ) -> Node:\n \"\"\"\nLookup and return a `Node` object.\nBy default, link matching keys into the same node.\nOtherwise instantiate a new node if it does not exist already.\n\n tokens:\nlist of parsed tokens\n\n key:\nlemma key (invariant)\n\n span:\ntoken span for the parsed entity\n\n kind:\nthe kind of this `Node` object\n\n text_id:\ntext (top-level document) identifier\n\n para_id:\nparagraph identitifer\n\n sent_id:\nsentence identifier\n\n label:\nnode label (for a new object)\n\n length:\nlength of token span\n\n linked:\nflag for whether this links to an entity\n\n returns:\nthe constructed `Node` object\n \"\"\"\n token_id: int = 0\n token_text: str = key\n token_pos: str = \"PROPN\"\n\n if span is not None:\n token_id = span.i\n token_text = span.text\n token_pos = span.pos_\n\n location: typing.List[ int ] = [ # type: ignore\n text_id,\n para_id,\n sent_id,\n token_id,\n ]\n\n if not linked:\n # construct a placeholder node (stopwords)\n self.nodes[key] = Node(\n len(self.nodes),\n key,\n span,\n span.text,\n span.pos_,\n kind,\n loc = [ location ],\n length = length,\n )\n\n elif key in self.nodes:\n # link to previously constructed entity node\n self.nodes[key].loc.append(location)\n self.nodes[key].count += 1\n\n # construct a new node for entity or lemma\n else:\n self.nodes[key] = Node(\n len(self.nodes),\n key,\n span,\n token_text,\n token_pos,\n kind,\n loc = [ location ],\n label = label,\n length = length,\n count = 1,\n )\n\n node: Node = self.nodes.get(key) # type: ignore\n\n if kind not in [ NodeEnum.CHU, NodeEnum.IRI ]:\n tokens.append(node)\n\n return node # type: ignore\n\n\n def make_edge ( # pylint: disable=R0913\n self,\n src_node: Node,\n dst_node: Node,\n kind: RelEnum,\n rel: str,\n prob: float,\n *,\n debug: bool = False,\n ) -> typing.Optional[ Edge ]:\n \"\"\"\nLookup an edge, creating a new one if it does not exist already,\nand increment the count if it does.\n\n src_node:\nsource node in the triple\n\n dst_node:\ndestination node in the triple\n\n kind:\nthe kind of this `Edge` object\n\n rel:\nrelation label\n\n prob:\nprobability of this `Edge` within the graph\n\n debug:\ndebugging flag\n\n returns:\nthe constructed `Edge` object; this may be `None` if the input parameters indicate skipping the edge\n \"\"\"\n key: str = \".\".join([\n str(src_node.node_id),\n str(dst_node.node_id),\n rel.replace(\" \", \"_\"),\n str(kind.value),\n ])\n\n if debug:\n ic(key)\n\n if key in self.edges:\n self.edges[key].count += 1\n\n elif src_node.node_id != dst_node.node_id:\n # preclude cycles in the graph\n self.edges[key] = Edge(\n src_node.node_id,\n dst_node.node_id,\n kind,\n rel,\n prob,\n )\n\n if debug:\n ic(self.edges.get(key))\n\n return self.edges.get(key)\n\n\n def construct_lemma_graph (\n self,\n *,\n debug: bool = False,\n ) -> None:\n \"\"\"\nConstruct the base level of the _lemma graph_ from the collected\nelements. This gets represented in `NetworkX` as a directed graph\nwith parallel edges.\n\n debug:\ndebugging flag\n \"\"\"\n # add the nodes\n self.lemma_graph.add_nodes_from([\n node.node_id\n for node in self.nodes.values()\n ])\n\n # populate the minimum required node properties\n for node_key, node in self.nodes.items():\n nx_node = self.lemma_graph.nodes[node.node_id]\n nx_node[\"title\"] = node_key\n nx_node[\"size\"] = node.count\n nx_node[\"value\"] = node.weight\n\n if debug:\n ic(nx_node)\n\n # add the edges and their properties\n self.lemma_graph.add_edges_from([\n (\n edge.src_node,\n edge.dst_node,\n {\n \"kind\": str(edge.kind),\n \"title\": edge.rel,\n \"weight\": float(edge.count),\n \"prob\": edge.prob,\n \"count\": edge.count,\n },\n )\n for edge_key, edge in self.edges.items()\n ])\n\n\n def dump_lemma_graph (\n self,\n ) -> str:\n \"\"\"\nDump the _lemma graph_ as a JSON string in _node-link_ format,\nsuitable for serialization and subsequent use in JavaScript,\nNeo4j, Graphistry, etc.\n\nMake sure to call beforehand: `TextGraphs.calc_phrase_ranks()`\n\n returns:\na JSON representation of the exported _lemma graph_\n \"\"\"\n # populate the optional node properties\n for node in self.nodes.values():\n nx_node = self.lemma_graph.nodes[node.node_id]\n nx_node[\"name\"] = node.text\n nx_node[\"kind\"] = str(node.kind)\n nx_node[\"iri\"] = node.label\n nx_node[\"subobj\"] = node.sub_obj\n nx_node[\"pos\"] = node.pos\n nx_node[\"loc\"] = str(node.loc)\n\n return json.dumps(\n nx.node_link_data(self.lemma_graph),\n sort_keys = True,\n indent = 2,\n separators = ( \",\", \":\" ),\n )" } ]
from collections import Counter, defaultdict from dataclasses import dataclass, field from icecream import ic # pylint: disable=E0401 from .elem import Edge, Node, NodeEnum, RelEnum from .graph import SimpleGraph import enum import itertools import pathlib import json import sys import typing import networkx as nx # pylint: disable=E0401 import pandas as pd # pylint: disable=E0401 import pyvis # pylint: disable=E0401
3,215
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This class handles toplogical transforms of graph data into a _graph of relations_ dual representation. see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md """ ###################################################################### ## class definitions class RelDir (enum.IntEnum): """ Enumeration for the directions of a relation. """ HEAD = 0 # relation flows into node TAIL = 1 # relation flows out of node def __str__ ( self ) -> str: """ Codec for representing as a string. """ decoder: typing.List[ str ] = [ "head", "tail", ] return decoder[self.value] @dataclass(order=False, frozen=False) class SheafSeed: """ A data class representing a node from the source graph plus its partial edge, based on a _Sheaf Theory_ decomposition of a graph. """ node_id: int rel_id: int rel_dir: RelDir
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This class handles toplogical transforms of graph data into a _graph of relations_ dual representation. see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md """ ###################################################################### ## class definitions class RelDir (enum.IntEnum): """ Enumeration for the directions of a relation. """ HEAD = 0 # relation flows into node TAIL = 1 # relation flows out of node def __str__ ( self ) -> str: """ Codec for representing as a string. """ decoder: typing.List[ str ] = [ "head", "tail", ] return decoder[self.value] @dataclass(order=False, frozen=False) class SheafSeed: """ A data class representing a node from the source graph plus its partial edge, based on a _Sheaf Theory_ decomposition of a graph. """ node_id: int rel_id: int rel_dir: RelDir
edge: Edge
0
2023-12-25 11:42:53+00:00
4k
lc4337/avfcomp
tests/test.py
[ { "identifier": "AVFComp", "path": "avfcomp/comp.py", "snippet": "class AVFComp(AVFParser):\n \"\"\"Compression of an AVF file.\"\"\"\n\n @staticmethod\n def zigzag_enc(n: int) -> int:\n \"\"\"Zigzag transformation encode.\"\"\"\n return (n << 1) ^ (n >> 31)\n\n @staticmethod\n def varint_compression(data: List[int]) -> bytes:\n \"\"\"\n Variable-length integer compression.\n\n Details:\n 0 0000000: 1-byte storage,\n 1 0000000 00000000: 2-byte storage.\n \"\"\"\n\n res = b\"\"\n for cur in data:\n if cur < 0x80: # 1-byte storage\n res += cur.to_bytes(1, byteorder=\"big\")\n\n elif cur < 0x8000:\n res += (cur | 0x8000).to_bytes(2, byteorder=\"big\") # 2-byte storage, high bits\n\n else:\n raise ValueError(\"Integer too large.\")\n\n return res\n\n def __init__(self, handler: Callable[..., T_CompFile] = CompHandler.LZMA):\n super().__init__()\n self.handler = handler\n\n def compress(self, data: bytes) -> bytes:\n \"\"\"Compression in bytes.\"\"\"\n data_io = BytesIO(data)\n self.read_data(data_io)\n\n comp_data = BytesIO()\n if self.handler is not CompHandler.PLAIN:\n with self.handler(comp_data, \"wb\") as fout:\n self.write_data(fout)\n else:\n self.write_data(comp_data)\n return comp_data.getvalue()\n\n def process_out(self, filename: str):\n \"\"\"write the output to a CVF file.\"\"\"\n with self.handler(filename, \"wb\") as fout:\n self.write_data(fout)\n\n def write_events(self, fout):\n fout.write(b\"\\x00\\x01\")\n\n op: List[int] = []\n timestamps: List[int] = []\n xpos: List[int] = []\n ypos: List[int] = []\n # num_events = len(self.events)\n for event in self.events:\n op.append(event[\"type\"])\n timestamps.append(event[\"gametime\"] // 10)\n xpos.append(event[\"xpos\"])\n ypos.append(event[\"ypos\"])\n\n def get_diff(arr: List[int]) -> List[int]:\n diff_arr = [arr[0]]\n for i in range(len(arr) - 1):\n diff_arr.append(arr[i + 1] - arr[i])\n return diff_arr\n\n timestamps = get_diff(timestamps)\n xpos = get_diff(xpos)\n ypos = get_diff(ypos)\n\n xpos = list(map(self.zigzag_enc, xpos))\n ypos = list(map(self.zigzag_enc, ypos))\n\n num_events = len(op)\n timestamps_r = []\n xpos_r = []\n ypos_r = []\n for i in range(num_events):\n key = (op[i], timestamps[i], xpos[i], ypos[i])\n enc = self.VEC_ENC_TABLE.get(key)\n if enc is not None:\n op[i] = enc\n\n else:\n op[i] = self.OP_ENC_TABLE[op[i]]\n timestamps_r.append(timestamps[i])\n xpos_r.append(xpos[i])\n ypos_r.append(ypos[i])\n\n data_r = timestamps_r + xpos_r + ypos_r\n data = bytes(op) + b\"\\xff\" + self.varint_compression(data_r)\n fout.write(len(data).to_bytes(3, byteorder=\"big\"))\n fout.write(data)\n\n def write_mines(self, fout):\n size = (self.rows * self.cols + 7) // 8\n data = bytearray(size)\n for mine in self.mines:\n idx = (mine[0] - 1) * self.cols + (mine[1] - 1)\n byte_idx = idx // 8\n bit_idx = idx % 8\n data[byte_idx] |= 1 << (7 - bit_idx)\n fout.write(data)\n\n def write_footer(self, fout):\n footer_simp = b\"\\r\".join(self.footer)\n fout.write(footer_simp)" }, { "identifier": "AVFDecomp", "path": "avfcomp/decomp.py", "snippet": "class AVFDecomp(AVFParser):\n \"\"\"Decompression of an AVF file.\"\"\"\n\n @staticmethod\n def zigzag_dec(n: int) -> int:\n \"\"\"Zigzag transformation decode.\"\"\"\n return (n >> 1) ^ -(n & 1)\n\n @staticmethod\n def varint_decompression(data: bytes) -> List[int]:\n \"\"\"Variable-length integer decompression.\"\"\"\n\n res = []\n cur = 0\n len_data = len(data)\n while cur < len_data:\n if (data[cur] >> 7) == 0: # stands for 1-byte storage\n res.append(data[cur])\n cur += 1\n\n elif cur + 1 < len_data: # stands for 2-byte storage\n res.append(((data[cur] & 0x7F) << 8) | data[cur + 1])\n cur += 2\n\n else:\n raise ValueError(\"Data corrupted or wrong format.\")\n\n return res\n\n def __init__(self, handler: Callable[..., T_CompFile] = CompHandler.LZMA):\n super().__init__()\n self.handler = handler\n\n def decompress(self, data: bytes) -> bytes:\n \"\"\"Decompression in bytes.\"\"\"\n data_io = BytesIO(data)\n if self.handler is not CompHandler.PLAIN:\n with self.handler(data_io, \"rb\") as fin:\n self.read_data(fin)\n else:\n self.read_data(data_io)\n\n decomp_data = BytesIO()\n self.write_data(decomp_data)\n return decomp_data.getvalue()\n\n def process_in(self, filename: str):\n \"\"\"Process the CVF file and parse the data to memory.\"\"\"\n with self.handler(filename, \"rb\") as fin:\n self.read_data(fin)\n\n def read_events(self, fin):\n # Read op codes\n data_len = int.from_bytes(fin.read(3), byteorder=\"big\")\n data = fin.read(data_len)\n\n num_events = data.index(b\"\\xff\")\n op = list(data[:num_events])\n\n left_data = self.varint_decompression(data[num_events + 1 :])\n left_events = len(left_data) // 3\n left_event_cur = 0\n\n timestamps = []\n xpos = []\n ypos = []\n\n # Read timestamps, xpos, ypos\n for i in range(num_events):\n if op[i] in self.OP_DEC_TABLE:\n op[i] = self.OP_DEC_TABLE[op[i]]\n timestamps.append(left_data[left_event_cur])\n xpos.append(left_data[left_events + left_event_cur])\n ypos.append(left_data[2 * left_events + left_event_cur])\n left_event_cur += 1\n else:\n op[i], ti, xi, yi = self.VEC_DEC_TABLE[op[i]]\n timestamps.append(ti)\n xpos.append(xi)\n ypos.append(yi)\n\n xpos = list(map(self.zigzag_dec, xpos))\n ypos = list(map(self.zigzag_dec, ypos))\n\n def get_presum(arr: List[int]) -> List[int]:\n presum_arr = [arr[0]]\n presum = arr[0]\n for i in range(len(arr) - 1):\n presum += arr[i + 1]\n presum_arr.append(presum)\n return presum_arr\n\n timestamps = get_presum(timestamps)\n xpos = get_presum(xpos)\n ypos = get_presum(ypos)\n\n self.events = []\n for i in range(num_events):\n event = {\n \"type\": op[i],\n \"gametime\": timestamps[i] * 10,\n \"xpos\": xpos[i],\n \"ypos\": ypos[i],\n }\n self.events.append(event)\n\n def read_mines(self, fin):\n cols, rows = self.cols, self.rows\n size = (rows * cols + 7) // 8\n data = fin.read(size)\n self.mines = []\n for i in range(cols):\n for j in range(rows):\n idx = j * cols + i\n byte_idx = idx // 8\n bit_idx = idx % 8\n if (data[byte_idx] >> (7 - bit_idx)) & 1:\n mine = (j + 1, i + 1)\n self.mines.append(mine)\n\n def read_footer(self, fin):\n footer_simp = fin.read()\n self.footer = footer_simp.split(b\"\\r\")" }, { "identifier": "CompHandler", "path": "avfcomp/handler.py", "snippet": "class CompHandler:\n \"\"\"Compression handlers.\"\"\"\n\n PLAIN: Callable[..., BufferedIOBase] = open\n GZIP: Callable[..., gzip.GzipFile] = gzip.open\n BZIP2: Callable[..., bz2.BZ2File] = bz2.open\n LZMA: Callable[..., lzma.LZMAFile] = lzma.open" }, { "identifier": "T_CompFile", "path": "avfcomp/handler.py", "snippet": "class CompHandler:\n PLAIN: Callable[..., BufferedIOBase] = open\n GZIP: Callable[..., gzip.GzipFile] = gzip.open\n BZIP2: Callable[..., bz2.BZ2File] = bz2.open\n LZMA: Callable[..., lzma.LZMAFile] = lzma.open" } ]
import hashlib import shutil import time import unittest from os import listdir, mkdir, path from typing import Any, Callable, Iterator, Tuple from avfcomp import AVFComp, AVFDecomp, CompHandler from avfcomp.handler import T_CompFile
2,659
"""Test compression and decompression.""" work_dir = path.dirname(path.dirname(__file__)) data_path = path.join(work_dir, "data") beg_path = path.join(data_path, "avf_beg") int_path = path.join(data_path, "avf_int") exp_path = path.join(data_path, "avf_exp") cvf_path = path.join(data_path, "cvf") decomp_path = path.join(data_path, "avf_decomp") # refresh def refresh(): """Refresh the data directory.""" shutil.rmtree(cvf_path, ignore_errors=True) shutil.rmtree(decomp_path, ignore_errors=True) mkdir(cvf_path) mkdir(decomp_path) def list_files(paths: str) -> Iterator[Tuple[str, str]]: """List all files in a directory.""" for file in listdir(paths): yield file, path.join(paths, file) def calc_file_hash(file_path): """Calculate the hash of a file.""" with open(file_path, "rb") as fin: return hashlib.sha256(fin.read()).hexdigest() def cost_time(func: Callable) -> Callable[..., Tuple[Any, float]]: """Calculate the time cost of a function.""" def fun(*args, **kwargs) -> Tuple[Any, float]: t = time.perf_counter() result = func(*args, **kwargs) return (result, time.perf_counter() - t) return fun @cost_time
"""Test compression and decompression.""" work_dir = path.dirname(path.dirname(__file__)) data_path = path.join(work_dir, "data") beg_path = path.join(data_path, "avf_beg") int_path = path.join(data_path, "avf_int") exp_path = path.join(data_path, "avf_exp") cvf_path = path.join(data_path, "cvf") decomp_path = path.join(data_path, "avf_decomp") # refresh def refresh(): """Refresh the data directory.""" shutil.rmtree(cvf_path, ignore_errors=True) shutil.rmtree(decomp_path, ignore_errors=True) mkdir(cvf_path) mkdir(decomp_path) def list_files(paths: str) -> Iterator[Tuple[str, str]]: """List all files in a directory.""" for file in listdir(paths): yield file, path.join(paths, file) def calc_file_hash(file_path): """Calculate the hash of a file.""" with open(file_path, "rb") as fin: return hashlib.sha256(fin.read()).hexdigest() def cost_time(func: Callable) -> Callable[..., Tuple[Any, float]]: """Calculate the time cost of a function.""" def fun(*args, **kwargs) -> Tuple[Any, float]: t = time.perf_counter() result = func(*args, **kwargs) return (result, time.perf_counter() - t) return fun @cost_time
def get_comp(paths: str, handler: Callable[..., T_CompFile]) -> Tuple[int, int]:
3
2023-12-22 02:19:59+00:00
4k
Noubissie237/StockManagment
StockManagment/App/views.py
[ { "identifier": "panier_cookie", "path": "StockManagment/App/utils.py", "snippet": "def panier_cookie(request):\n articles = []\n\n commande = {\n 'get_panier_total':0,\n 'get_panier_article':0,\n 'produit_physique': True,\n }\n\n nombre_article = commande['get_panier_article']\n\n try:\n panier = json.loads(request.COOKIES.get('panier'))\n for obj in panier:\n\n nombre_article += panier[obj]['qte']\n\n produit = Produit.objects.get(id=obj)\n\n total = produit.price * panier[obj]['qte']\n\n commande['get_panier_article'] += panier[obj]['qte']\n\n commande['get_panier_total'] += total\n\n article = {\n 'produit': {\n 'pk': produit.id,\n 'name': produit.name,\n 'price': produit.price,\n 'nombre': produit.nombre\n },\n 'quantite': panier[obj]['qte'],\n 'get_total': total\n\n }\n\n articles.append(article)\n\n if produit.digital == False:\n commande['produit_physique'] = True\n \n except:\n pass\n\n context = {\n 'articles' : articles, \n 'commande': commande,\n 'nombre_article': nombre_article\n }\n\n return context" }, { "identifier": "data_cookie", "path": "StockManagment/App/utils.py", "snippet": "def data_cookie(request):\n\n if request.user.is_authenticated:\n\n client = request.user.client\n\n commande, created = Commande.objects.get_or_create(client=client, complete=False)\n\n articles = commande.commandearticle_set.all()\n\n nombre_article = commande.get_panier_article\n\n else:\n\n cookie_panier = panier_cookie(request)\n articles = cookie_panier['articles']\n commande = cookie_panier['commande']\n nombre_article = cookie_panier['nombre_article']\n\n context = {\n 'articles' : articles, \n 'commande': commande,\n 'nombre_article': nombre_article\n }\n\n return context" }, { "identifier": "getDataFromApi", "path": "StockManagment/App/utils.py", "snippet": "def getDataFromApi(request):\n try:\n url = \"http://localhost:8000/api/prescriptions/\"\n\n response = requests.get(url)\n \n dataToSave = response.json()\n\n for elt in dataToSave:\n\n if not User.objects.filter(username=elt['email']).exists():\n\n user = User.objects.create_user(username=elt['email'], email=elt['email'], password=elt['Token'])\n\n user.save()\n\n \n if Prescription.objects.filter(email=elt['email']).exists():\n pass\n else:\n tmp = Prescription(nom=elt['nom'], prenom=elt['prenom'], age=elt['age'], sexe=elt['sexe'], email=elt['email'],\n antecedent=elt['antecedent'], prescription1=elt['prescription1'], prescription2=elt['prescription2'], \n prescription3=elt['prescription3'])\n tmp.save()\n\n try:\n user = User.objects.get(username=elt['email'])\n\n client = Client.objects.create(user=user, name=elt[\"nom\"], email=elt['email'])\n\n print(\"valid\")\n\n except:\n print('invalid')\n\n return \"SUCCESS\"\n \n except:\n return \"FAILED\"" }, { "identifier": "LoginForm", "path": "StockManagment/App/forms.py", "snippet": "class LoginForm(forms.Form):\n username = forms.CharField(label='Nom d\\'utilisateur', widget=forms.TextInput(attrs={'class': 'form-control'}))\n password = forms.CharField(label='Mot de passe', widget=PasswordInputWithClass())" } ]
from django.shortcuts import render, redirect from django.http import JsonResponse, HttpResponse from .models import * from django.contrib.auth.decorators import login_required from datetime import datetime from .utils import panier_cookie, data_cookie, getDataFromApi from .forms import LoginForm from django.contrib.auth import authenticate, login, logout import json, requests
1,952
@login_required(login_url='/login') def update_article(request, *args, **kwargs): data = json.loads(request.body) produit_id = data['produit_id'] action = data['action'] produit = Produit.objects.get(id=produit_id) client = request.user.client commande, created = Commande.objects.get_or_create(client=client, complete=False) commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit) if action == "add": commande_article.quantite += 1 if action == "remove": commande_article.quantite -=1 commande_article.save() if commande_article.quantite <= 0: commande_article.delete() return JsonResponse("panier modifié", safe=False) @login_required(login_url='/login') def commandeAnonyme(request, data): name = data['form']['name'] username = data['form']['username'] email = data['form']['email'] phone = data['form']['phone'] cookie_panier = panier_cookie(request) articles = cookie_panier['articles'] client, created = Client.objects.get_or_create( email=email ) client.name = name client.save() commande = Commande.objects.create( client=client ) for article in articles: produit = Produit.objects.get(id=article['produit']['pk']) CommandeArticle.objects.create( produit=produit, commande=commande, quantite=article['quantite'] ) return client, commande @login_required(login_url='/login') def traitement_commande(request, *args, **kwargs): data = json.loads(request.body) transaction_id = datetime.now().timestamp() if request.user.is_authenticated: client = request.user.client commande, created = Commande.objects.get_or_create(client=client, complete=False) else: client, commande = commandeAnonyme(request, data) total = float(data['form']['total']) commande.transaction_id = data["payment_info"]["transaction_id"] commande.total_trans = data['payment_info']['total'] if commande.get_panier_total == total: commande.complete = True commande.status = data['payment_info']['status'] else: commande.status = "REFUSED" commande.save() return JsonResponse("Attention!!! Traitement Refuse Fraude detecte!", safe=False) commande.save() if commande.produit_physique: AddressChipping.objects.create( client=client, commande=commande, addresse=data['shipping']['address'], ville=data['shipping']['city'], zipcode=data['shipping']['zipcode'] ) return JsonResponse("Traitement complet", safe=False) def login_view(request): if request.method == 'POST': form = LoginForm(request.POST) if form.is_valid(): username = form.cleaned_data['username'] password = form.cleaned_data['password'] user = authenticate(request, username=username, password=password) if user is not None: login(request, user) return render(request, 'shop/index.html', context={'name' : request.user.username}) else: form.add_error(None, "Nom d'utilisateur ou mot de passe incorrect.") else:
@login_required(login_url='/login') def shop(request, *args, **kwargs): """Vue des produits""" produits = Produit.objects.all() data = data_cookie(request) articles = data['articles'] commande = data['commande'] nombre_article = data['nombre_article'] context = { 'produits': produits, 'nombre_article': nombre_article } return render(request, 'shop/index.html', context) @login_required(login_url='/login') def panier(request, *args, **kwargs): data = data_cookie(request) articles = data['articles'] commande = data['commande'] nombre_article = data['nombre_article'] context = { 'articles' : articles, 'commande': commande, 'nombre_article': nombre_article } return render(request, 'shop/panier.html', context) @login_required(login_url='/login') def commande(request, *args, **kwargs): data = data_cookie(request) articles = data['articles'] commande = data['commande'] nombre_article = data['nombre_article'] context = { 'articles' : articles, 'commande': commande, 'nombre_article': nombre_article } return render(request, 'shop/commande.html', context) @login_required(login_url='/login') def update_article(request, *args, **kwargs): data = json.loads(request.body) produit_id = data['produit_id'] action = data['action'] produit = Produit.objects.get(id=produit_id) client = request.user.client commande, created = Commande.objects.get_or_create(client=client, complete=False) commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit) if action == "add": commande_article.quantite += 1 if action == "remove": commande_article.quantite -=1 commande_article.save() if commande_article.quantite <= 0: commande_article.delete() return JsonResponse("panier modifié", safe=False) @login_required(login_url='/login') def commandeAnonyme(request, data): name = data['form']['name'] username = data['form']['username'] email = data['form']['email'] phone = data['form']['phone'] cookie_panier = panier_cookie(request) articles = cookie_panier['articles'] client, created = Client.objects.get_or_create( email=email ) client.name = name client.save() commande = Commande.objects.create( client=client ) for article in articles: produit = Produit.objects.get(id=article['produit']['pk']) CommandeArticle.objects.create( produit=produit, commande=commande, quantite=article['quantite'] ) return client, commande @login_required(login_url='/login') def traitement_commande(request, *args, **kwargs): data = json.loads(request.body) transaction_id = datetime.now().timestamp() if request.user.is_authenticated: client = request.user.client commande, created = Commande.objects.get_or_create(client=client, complete=False) else: client, commande = commandeAnonyme(request, data) total = float(data['form']['total']) commande.transaction_id = data["payment_info"]["transaction_id"] commande.total_trans = data['payment_info']['total'] if commande.get_panier_total == total: commande.complete = True commande.status = data['payment_info']['status'] else: commande.status = "REFUSED" commande.save() return JsonResponse("Attention!!! Traitement Refuse Fraude detecte!", safe=False) commande.save() if commande.produit_physique: AddressChipping.objects.create( client=client, commande=commande, addresse=data['shipping']['address'], ville=data['shipping']['city'], zipcode=data['shipping']['zipcode'] ) return JsonResponse("Traitement complet", safe=False) def login_view(request): if request.method == 'POST': form = LoginForm(request.POST) if form.is_valid(): username = form.cleaned_data['username'] password = form.cleaned_data['password'] user = authenticate(request, username=username, password=password) if user is not None: login(request, user) return render(request, 'shop/index.html', context={'name' : request.user.username}) else: form.add_error(None, "Nom d'utilisateur ou mot de passe incorrect.") else:
res = getDataFromApi(request)
2
2023-12-29 11:13:34+00:00
4k
kokiez/raydium-convert-SOLorTokens
main.py
[ { "identifier": "fetch_pool_keys", "path": "pools.py", "snippet": "def fetch_pool_keys(mint: str):\r\n amm_info = {}\r\n all_pools = {}\r\n try:\r\n # Using this so it will be faster else no option, we go the slower way.\r\n with open('all_pools.json', 'r') as file:\r\n all_pools = json.load(file)\r\n amm_info = extract_pool_info(all_pools, mint)\r\n except:\r\n resp = requests.get('https://api.raydium.io/v2/sdk/liquidity/mainnet.json', stream=True)\r\n pools = resp.json()\r\n official = pools['official']\r\n unofficial = pools['unOfficial'] \r\n all_pools = official + unofficial\r\n\r\n # Store all_pools in a JSON file\r\n with open('all_pools.json', 'w') as file:\r\n json.dump(all_pools, file, default=lambda x: x.__dict__)\r\n amm_info = extract_pool_info(all_pools, mint)\r\n\r\n return {\r\n 'amm_id': Pubkey.from_string(amm_info['id']),\r\n 'authority': Pubkey.from_string(amm_info['authority']),\r\n 'base_mint': Pubkey.from_string(amm_info['baseMint']),\r\n 'base_decimals': amm_info['baseDecimals'],\r\n 'quote_mint': Pubkey.from_string(amm_info['quoteMint']),\r\n 'quote_decimals': amm_info['quoteDecimals'],\r\n 'lp_mint': Pubkey.from_string(amm_info['lpMint']),\r\n 'open_orders': Pubkey.from_string(amm_info['openOrders']),\r\n 'target_orders': Pubkey.from_string(amm_info['targetOrders']),\r\n 'base_vault': Pubkey.from_string(amm_info['baseVault']),\r\n 'quote_vault': Pubkey.from_string(amm_info['quoteVault']),\r\n 'market_id': Pubkey.from_string(amm_info['marketId']),\r\n 'market_base_vault': Pubkey.from_string(amm_info['marketBaseVault']),\r\n 'market_quote_vault': Pubkey.from_string(amm_info['marketQuoteVault']),\r\n 'market_authority': Pubkey.from_string(amm_info['marketAuthority']),\r\n 'bids': Pubkey.from_string(amm_info['marketBids']),\r\n 'asks': Pubkey.from_string(amm_info['marketAsks']),\r\n 'event_queue': Pubkey.from_string(amm_info['marketEventQueue'])\r\n }\r" }, { "identifier": "make_simulate_pool_info_instruction", "path": "pools.py", "snippet": "def make_simulate_pool_info_instruction(accounts):\r\n\r\n keys = [\r\n AccountMeta(pubkey=accounts[\"amm_id\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"authority\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"open_orders\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"base_vault\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"quote_vault\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"lp_mint\"], is_signer=False, is_writable=False),\r\n AccountMeta(pubkey=accounts[\"market_id\"], is_signer=False, is_writable=False), \r\n AccountMeta(pubkey=accounts['event_queue'], is_signer=False, is_writable=False), \r\n \r\n \r\n ]\r\n data = POOL_INFO_LAYOUT.build(\r\n dict(\r\n instruction=12,\r\n simulate_type=0\r\n )\r\n )\r\n return Instruction(AMM_PROGRAM_ID, data, keys)\r" } ]
from solana.rpc.commitment import Commitment from solana.rpc.api import Client from solana.transaction import Transaction from solders.keypair import Keypair from pools import fetch_pool_keys, make_simulate_pool_info_instruction from ast import literal_eval import re
1,722
LIQUIDITY_FEES_NUMERATOR = 25 LIQUIDITY_FEES_DENOMINATOR = 10000 """ Required Variables """ endpoint = "your_rpc_url" payer = Keypair.from_base58_string("your_private_key") token = "ca of your mint/mint address" solana_client = Client(endpoint, commitment=Commitment("confirmed"), blockhash_cache=True) def calculateAmountOut(amount, pool_info): status = pool_info['status'] SWAP_decimals = pool_info['coin_decimals'] #swap coin SOL_decimals = pool_info['pc_decimals'] #SOL COIN_lp_decimals = pool_info['lp_decimals'] #swap coin pool_SOL_amount = pool_info['pool_pc_amount'] #sol pool_SWAP_amount = pool_info['pool_coin_amount'] #coin Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin reserve_in = pool_SOL_amount reserve_out = pool_SWAP_amount current_price = reserve_out / reserve_in # print(f"Current Price in SOL: {current_price:.12f}") amount_in = amount * 10 ** SOL_decimals Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR amount_in_with_fee = amount_in - Fees amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee) # Slippage = 1 + slippage # minimumAmountOut = amountOutRaw / slippage return amountOutRaw / 10 ** SWAP_decimals def calculateAmountIn(amount, pool_info): SWAP_decimals = pool_info['coin_decimals'] #swap coin SOL_decimals = pool_info['pc_decimals'] #SOL COIN_lp_decimals = pool_info['lp_decimals'] #swap coin pool_SOL_amount = pool_info['pool_pc_amount'] #sol pool_SWAP_amount = pool_info['pool_coin_amount'] #coin Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin reserve_in = pool_SWAP_amount reserve_out = pool_SOL_amount current_price = reserve_out / reserve_in # print(f"Current Price in SOL: {current_price:.12f}") amount_in = amount * 10 ** SWAP_decimals Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR amount_in_with_fee = amount_in - Fees amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee) # Slippage = 1 + slippage # minimumAmountOut = amountOutRaw / slippage return amountOutRaw / 10 ** SOL_decimals def PoolInfo(mint): while True: quote = "" pool_keys = fetch_pool_keys(mint) if str(pool_keys['quote_mint']) == "So11111111111111111111111111111111111111112": quote = "SOL" elif str(pool_keys['quote_mint']) == "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v": quote = "USDC" elif str(pool_keys['quote_mint']) == "Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB": quote = "USDC" recent_block_hash = solana_client.get_latest_blockhash().value.blockhash tx = Transaction(recent_blockhash=recent_block_hash, fee_payer=payer.pubkey())
LIQUIDITY_FEES_NUMERATOR = 25 LIQUIDITY_FEES_DENOMINATOR = 10000 """ Required Variables """ endpoint = "your_rpc_url" payer = Keypair.from_base58_string("your_private_key") token = "ca of your mint/mint address" solana_client = Client(endpoint, commitment=Commitment("confirmed"), blockhash_cache=True) def calculateAmountOut(amount, pool_info): status = pool_info['status'] SWAP_decimals = pool_info['coin_decimals'] #swap coin SOL_decimals = pool_info['pc_decimals'] #SOL COIN_lp_decimals = pool_info['lp_decimals'] #swap coin pool_SOL_amount = pool_info['pool_pc_amount'] #sol pool_SWAP_amount = pool_info['pool_coin_amount'] #coin Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin reserve_in = pool_SOL_amount reserve_out = pool_SWAP_amount current_price = reserve_out / reserve_in # print(f"Current Price in SOL: {current_price:.12f}") amount_in = amount * 10 ** SOL_decimals Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR amount_in_with_fee = amount_in - Fees amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee) # Slippage = 1 + slippage # minimumAmountOut = amountOutRaw / slippage return amountOutRaw / 10 ** SWAP_decimals def calculateAmountIn(amount, pool_info): SWAP_decimals = pool_info['coin_decimals'] #swap coin SOL_decimals = pool_info['pc_decimals'] #SOL COIN_lp_decimals = pool_info['lp_decimals'] #swap coin pool_SOL_amount = pool_info['pool_pc_amount'] #sol pool_SWAP_amount = pool_info['pool_coin_amount'] #coin Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin reserve_in = pool_SWAP_amount reserve_out = pool_SOL_amount current_price = reserve_out / reserve_in # print(f"Current Price in SOL: {current_price:.12f}") amount_in = amount * 10 ** SWAP_decimals Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR amount_in_with_fee = amount_in - Fees amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee) # Slippage = 1 + slippage # minimumAmountOut = amountOutRaw / slippage return amountOutRaw / 10 ** SOL_decimals def PoolInfo(mint): while True: quote = "" pool_keys = fetch_pool_keys(mint) if str(pool_keys['quote_mint']) == "So11111111111111111111111111111111111111112": quote = "SOL" elif str(pool_keys['quote_mint']) == "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v": quote = "USDC" elif str(pool_keys['quote_mint']) == "Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB": quote = "USDC" recent_block_hash = solana_client.get_latest_blockhash().value.blockhash tx = Transaction(recent_blockhash=recent_block_hash, fee_payer=payer.pubkey())
sim_inst = make_simulate_pool_info_instruction(pool_keys)
1
2023-12-29 12:35:38+00:00
4k
Sen-Yao/RCS-Calculator
main.py
[ { "identifier": "load_E_field", "path": "info_io.py", "snippet": "def load_E_field(path):\n E_field_table = {}\n try:\n with open(path, 'r') as file:\n line = file.readline()\n\n # Get numbers of example frequencies\n while line != '// #Frequencies\\n':\n line = file.readline()\n line = file.readline()\n total_frequencies_sample = int(line)\n\n # Get frequencies value\n while line != '// Radiated/Accepted/Stimulated Power , Frequency \\n':\n line = file.readline()\n # Skip the next three lines\n for _ in range(3):\n next(file)\n line = file.readline()\n frequency = float(line)\n\n # Get total samples information\n while line != '// >> Total #phi samples, total #theta samples\\n':\n line = file.readline()\n line = file.readline()\n total_samples = line.split()\n total_phi_samples = float(total_samples[0])\n total_theta_samples = float(total_samples[1])\n\n # Get valid E-field information\n while line != '// >> Phi, Theta, Re(E_Theta), Im(E_Theta), Re(E_Phi), Im(E_Phi): \\n':\n line = file.readline()\n line = file.readline()\n # Store the E-field data\n while line:\n phi = float(line[:8])\n theta = float(line[9:18])\n E_theta_Re = float(line[19:35])\n E_theta_Im = float(line[36:52])\n E_phi_Re = float(line[53:68])\n E_phi_Im = float(line[69:])\n\n E_theta = complex(E_theta_Re, E_theta_Im)\n E_phi = complex(E_phi_Re, E_phi_Im)\n angle = (phi, theta)\n E_field = [E_theta, E_phi]\n E_field_table[angle] = E_field\n line = file.readline()\n\n sample_num = [total_phi_samples, total_theta_samples, total_frequencies_sample]\n return E_field_table, sample_num, frequency\n except FileNotFoundError:\n print('E-field information not found!')" }, { "identifier": "single_RCS_output", "path": "info_io.py", "snippet": "def single_RCS_output(RCS_table, use_dB=False):\n for angles, RCS_value in RCS_table.items():\n if use_dB:\n print('RCS: σ( θ=', angles[1], ',φ=', angles[0], ')=', rcs_to_dB(RCS_value))\n else:\n print('RCS: σ( θ=', angles[1], ',φ=', angles[0], ')=', RCS_value)" }, { "identifier": "load_RCS", "path": "info_io.py", "snippet": "def load_RCS(path):\n RCS_table = {}\n try:\n with open(path, 'r') as file:\n line = file.readline()\n while line != ('Theta [deg.] Phi [deg.] Abs(RCS )[dB(m^2)] Abs(Theta)[dB(m^2)] Phase(Theta)[deg.] '\n 'Abs(Phi )[dB(m^2)] Phase(Phi )[deg.] Ax.Ratio[dB ] \\n'):\n line = file.readline()\n line = file.readline()\n line = file.readline()\n # Store the RCS data\n while line:\n theta = float(line[:8])\n phi = float(line[9:25])\n RCS_value = float(line[26:46])\n\n angle = (phi, theta)\n RCS_table[angle] = RCS_value\n line = file.readline()\n return RCS_table\n\n except FileNotFoundError:\n print('RCS information not found!')" }, { "identifier": "rcs_to_dB", "path": "info_io.py", "snippet": "def rcs_to_dB(rcs_m2, reference_value=1.0):\n return 10 * math.log10(rcs_m2 / reference_value)" }, { "identifier": "RCS_Table", "path": "RCS_Table.py", "snippet": "class RCS_Table:\n def __init__(self, R, Ei):\n self.E_table = {}\n self.RCS_table = {}\n self.CST_table = {}\n self.total_RCS = None\n self.frequency = None\n self.frequency_str = ''\n self.sample_num = []\n\n self.R = R\n self.Ei = Ei\n\n def calculate_single_direction_RCS(self):\n for angles, E_values_list in self.E_table.items( ):\n E_phi, E_theta = self.E_table[angles]\n self.RCS_table[angles] = (4 * math.pi * (self.R ** 2) *\n ((abs(E_phi) ** 2 + abs(E_theta) ** 2) / abs(self.Ei) ** 2))\n\n def calculate_total_RCS(self):\n total_RCS_value = 0\n delta_phi = 360 / (self.sample_num[0] - 1)\n delta_theta = 180 / (self.sample_num[1] - 1)\n\n for angles, RCS_value in self.RCS_table.items( ):\n # When theta = 0 or 180, area approx 0\n if angles[1] == 0 or angles[1] == 180:\n area = self.R ** 2 * math.radians(delta_theta / 2) * math.radians(delta_phi) * math.sin(\n math.radians(delta_theta / 2)) / 2\n else:\n area = self.R ** 2 * ((math.radians(delta_theta) / 2) *\n (math.radians(delta_phi) * math.sin(math.radians(angles[1] - delta_theta / 2))\n + math.radians(delta_phi) * math.sin(\n math.radians(angles[1] + delta_theta / 2))))\n total_RCS_value += ((area * RCS_value) / (4 * math.pi))\n self.total_RCS = total_RCS_value\n\n def check_single_RCS(self):\n for angles, result_RCS in self.RCS_table.items( ):\n if abs(self.CST_table[angles] - rcs_to_dB(result_RCS)) > 0.01:\n warnings.warn('Error! When θ=' + str(angles[1]) + ',φ=' + str(angles[0]) + ')=, result RCS=' +\n str(rcs_to_dB(result_RCS)) + 'dB, but RCS from CST=' + str(self.CST_table[angles]))\n\n def remove_redundancy_table(self):\n angles_to_remove = []\n for angles, E in self.E_table.items( ):\n if angles[0] == 360:\n angles_to_remove.append(angles)\n for angles in angles_to_remove:\n self.E_table.pop(angles)\n\n def renew_frequency_str(self):\n if 1000 < self.frequency < 1000000:\n self.frequency_str = str(int(self.frequency / 1000)) + 'k'\n elif 1000000 < self.frequency < 1000000000:\n self.frequency_str = str(int(self.frequency / 1000000)) + 'M'\n else:\n self.frequency_str = str(int(self.frequency))" } ]
import os import argparse import tqdm import warnings from info_io import load_E_field, single_RCS_output, load_RCS, rcs_to_dB from RCS_Table import RCS_Table
1,897
def main(): """ TODO: Add rad unit :return: """ parser = argparse.ArgumentParser() parser.add_argument('--path', default='data', type=str, help='path to data folder') parser.add_argument('--Ei', '-E', default=1, type=float, help='Mode of incident electric field vector') parser.add_argument('--R', '-R', default=1, type=float, help='Far field range') args = parser.parse_args() print('\n\nParameter:', '\nPath to data folder =', args.path, '\nMode of incident electric field vector =', args.Ei, '\nFar field range =', args.R, '\n\n Start Calculating...') RCS_Table_list = [] for root, directories, files in os.walk(args.path): # Detect E_field txt for filename in tqdm.tqdm(files): if filename[0:2] == 'E_' and filename[-4:] == '.txt':
def main(): """ TODO: Add rad unit :return: """ parser = argparse.ArgumentParser() parser.add_argument('--path', default='data', type=str, help='path to data folder') parser.add_argument('--Ei', '-E', default=1, type=float, help='Mode of incident electric field vector') parser.add_argument('--R', '-R', default=1, type=float, help='Far field range') args = parser.parse_args() print('\n\nParameter:', '\nPath to data folder =', args.path, '\nMode of incident electric field vector =', args.Ei, '\nFar field range =', args.R, '\n\n Start Calculating...') RCS_Table_list = [] for root, directories, files in os.walk(args.path): # Detect E_field txt for filename in tqdm.tqdm(files): if filename[0:2] == 'E_' and filename[-4:] == '.txt':
Table = RCS_Table(args.R, args.Ei)
4
2023-12-24 02:32:49+00:00
4k
karloskar/homeassistant-goecontroller-mqtt
custom_components/goecontroller_mqtt/sensor.py
[ { "identifier": "SENSORS", "path": "custom_components/goecontroller_mqtt/definitions/sensor.py", "snippet": "SENSORS: tuple[GoEControllerSensorEntityDescription, ...] = (\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"0\", \"i\"),\n name=\"Amp 1\",\n state=extract_isv,\n device_class=SensorDeviceClass.CURRENT,\n native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"1\", \"i\"),\n name=\"Amp 2\",\n state=extract_isv,\n device_class=SensorDeviceClass.CURRENT,\n native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"2\", \"i\"),\n name=\"Amp 3\",\n state=extract_isv,\n device_class=SensorDeviceClass.CURRENT,\n native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"3\", \"i\"),\n name=\"Amp 4\",\n state=extract_isv,\n device_class=SensorDeviceClass.CURRENT,\n native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"4\", \"i\"),\n name=\"Amp 5\",\n state=extract_isv,\n device_class=SensorDeviceClass.CURRENT,\n native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"5\", \"i\"),\n name=\"Amp 6\",\n state=extract_isv,\n device_class=SensorDeviceClass.CURRENT,\n native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"0\", \"p\"),\n name=\"Power 1\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"1\", \"p\"),\n name=\"Power 2\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"2\", \"p\"),\n name=\"Power 3\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"3\", \"p\"),\n name=\"Power 4\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"4\", \"p\"),\n name=\"Power 5\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"5\", \"p\"),\n name=\"Power 6\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"0\", \"f\"),\n name=\"Power Factor 1\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER_FACTOR,\n native_unit_of_measurement=PERCENTAGE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"1\", \"f\"),\n name=\"Power Factor 2\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER_FACTOR,\n native_unit_of_measurement=PERCENTAGE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"2\", \"f\"),\n name=\"Power Factor 3\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER_FACTOR,\n native_unit_of_measurement=PERCENTAGE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"3\", \"f\"),\n name=\"Power Factor 4\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER_FACTOR,\n native_unit_of_measurement=PERCENTAGE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"4\", \"f\"),\n name=\"Power Factor 5\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER_FACTOR,\n native_unit_of_measurement=PERCENTAGE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"5\", \"f\"),\n name=\"Power Factor 6\",\n state=extract_isv,\n device_class=SensorDeviceClass.POWER_FACTOR,\n native_unit_of_measurement=PERCENTAGE,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"ccp\",\n attribute=\"0\",\n name=\"Power (home)\",\n state=extract_ccp,\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"ccp\",\n name=\"Power (grid)\",\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n attribute=\"1\",\n state=extract_ccp,\n ),\n GoEControllerSensorEntityDescription(\n key=\"ccp\",\n name=\"Power (car)\",\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n attribute=\"2\",\n state=extract_ccp,\n ),\n GoEControllerSensorEntityDescription(\n key=\"ccp\",\n name=\"Power (relay)\",\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n attribute=\"3\",\n state=extract_ccp,\n ),\n GoEControllerSensorEntityDescription(\n key=\"ccp\",\n name=\"Power (solar)\",\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n attribute=\"4\",\n state=extract_ccp,\n ),\n GoEControllerSensorEntityDescription(\n key=\"ccp\",\n name=\"Power (battery)\",\n device_class=SensorDeviceClass.POWER,\n native_unit_of_measurement=UnitOfPower.WATT,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=True,\n disabled=False,\n attribute=\"5\",\n state=extract_ccp,\n ),\n GoEControllerSensorEntityDescription(\n key=\"fwv\",\n name=\"Firmware version\",\n entity_category=EntityCategory.DIAGNOSTIC,\n state=remove_quotes,\n device_class=None,\n native_unit_of_measurement=None,\n state_class=None,\n icon=\"mdi:numeric\",\n entity_registry_enabled_default=False,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"rbc\",\n name=\"Reboot counter\",\n entity_category=EntityCategory.DIAGNOSTIC,\n device_class=None,\n native_unit_of_measurement=None,\n state_class=SensorStateClass.TOTAL_INCREASING,\n icon=\"mdi:counter\",\n entity_registry_enabled_default=True,\n disabled=False,\n ),\n GoEControllerSensorEntityDescription(\n key=\"rbt\",\n name=\"Uptime\",\n entity_category=EntityCategory.DIAGNOSTIC,\n device_class=None,\n native_unit_of_measurement=None,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=False,\n disabled=True,\n disabled_reason=\"TODO: Convert to a timestamp first\",\n ),\n GoEControllerSensorEntityDescription(\n key=\"rssi\",\n name=\"WiFi signal strength\",\n entity_category=EntityCategory.DIAGNOSTIC,\n device_class=SensorDeviceClass.SIGNAL_STRENGTH,\n native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,\n state_class=SensorStateClass.MEASUREMENT,\n entity_registry_enabled_default=False,\n disabled=False,\n ),\n)" }, { "identifier": "GoEControllerSensorEntityDescription", "path": "custom_components/goecontroller_mqtt/definitions/sensor.py", "snippet": "class GoEControllerSensorEntityDescription(GoEControllerEntityDescription, SensorEntityDescription):\n \"\"\"Sensor entity description for go-eController.\"\"\"\n\n domain: str = \"sensor\"" }, { "identifier": "GoEControllerEntity", "path": "custom_components/goecontroller_mqtt/entity.py", "snippet": "class GoEControllerEntity(Entity):\n \"\"\"Common go-eController entity.\"\"\"\n\n def __init__(\n self,\n config_entry: config_entries.ConfigEntry,\n description: GoEControllerEntityDescription,\n ) -> None:\n \"\"\"Initialize the sensor.\"\"\"\n topic_prefix = config_entry.data[CONF_TOPIC_PREFIX]\n serial_number = config_entry.data[CONF_SERIAL_NUMBER]\n\n self._topic = f\"{topic_prefix}/{serial_number}/{description.key}\"\n\n slug = slugify(self._topic.replace(\"/\", \"_\"))\n self.entity_id = f\"{description.domain}.{slug}\"\n\n parsed_attribute = description.attribute\n if isinstance(description.attribute, tuple):\n parsed_attribute = \"-\".join(description.attribute)\n\n self._attr_unique_id = \"-\".join(\n [serial_number, description.domain, description.key, parsed_attribute]\n )\n self._attr_device_info = DeviceInfo(\n identifiers={(DOMAIN, serial_number)},\n name=config_entry.title,\n manufacturer=DEVICE_INFO_MANUFACTURER,\n model=DEVICE_INFO_MODEL,\n )" } ]
import logging from homeassistant import config_entries, core from homeassistant.components import mqtt from homeassistant.components.sensor import SensorEntity from homeassistant.core import callback from .definitions.sensor import SENSORS, GoEControllerSensorEntityDescription from .entity import GoEControllerEntity
3,119
"""The go-eController (MQTT) sensor.""" _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry, async_add_entities, ): """Config entry setup.""" async_add_entities( GoEControllerSensor(config_entry, description)
"""The go-eController (MQTT) sensor.""" _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry, async_add_entities, ): """Config entry setup.""" async_add_entities( GoEControllerSensor(config_entry, description)
for description in SENSORS
0
2023-12-22 11:32:11+00:00
4k
T0kyoB0y/PotatoWidgets
PotatoWidgets/Widget/_Scroll.py
[ { "identifier": "Listener", "path": "PotatoWidgets/Variable/_Listener.py", "snippet": "class Listener(Variable):\n def __init__(self, callback, initial_value=None):\n super().__init__(initial_value)\n self._callback = callback\n self._thread = None\n self._stop_thread = threading.Event()\n self.start_listening()\n\n def stop_listening(self):\n if self._thread and self._thread.is_alive():\n self._stop_thread.set()\n self._thread.join()\n\n def start_listening(self):\n if self._thread and self._thread.is_alive():\n print(f\"{self} is already listening\")\n return\n\n self._stop_thread.clear()\n self._thread = threading.Thread(target=lambda: self._callback(self))\n self._thread.start()\n\n def get_value(self):\n return self._value\n\n def set_value(self, new_value):\n self._value = new_value\n self.emit(\"valuechanged\")\n\n def __str__(self):\n return str(self._value)" }, { "identifier": "Poll", "path": "PotatoWidgets/Variable/_Poll.py", "snippet": "class Poll(Variable):\n def __init__(self, interval, callback, initial_value=None):\n super().__init__(initial_value or callback())\n self._interval = self._parse_interval(interval)\n self._callback = callback\n self._timeout_id = None\n self.start_poll()\n\n def _parse_interval(self, interval):\n try:\n if isinstance(interval, str):\n unit = interval[-1].lower()\n value = int(interval[:-1])\n\n if unit == \"s\":\n return value * 1000\n elif unit == \"m\":\n return value * 60 * 1000\n elif unit == \"h\":\n return value * 60 * 60 * 1000\n elif isinstance(interval, int):\n return interval\n except (ValueError, IndexError):\n return int(interval)\n\n def is_polling(self):\n return bool(self._timeout_id)\n\n def stop_poll(self):\n if self._timeout_id:\n GLib.source_remove(self._timeout_id)\n self._timeout_id = None\n else:\n print(f\"{self} has no poll running\")\n\n def start_poll(self):\n if self.is_polling():\n print(f\"{self} is already polling\")\n return\n\n self._timeout_id = GLib.timeout_add(\n priority=GLib.PRIORITY_DEFAULT_IDLE,\n interval=self._interval,\n function=self._poll_callback,\n )\n\n def _poll_callback(self):\n self.set_value(self._callback())\n return GLib.SOURCE_CONTINUE\n\n def get_value(self):\n return self._value\n\n def set_value(self, new_value):\n self._value = new_value\n self.emit(\"valuechanged\")\n\n def __str__(self):\n return str(self._value)" }, { "identifier": "Variable", "path": "PotatoWidgets/Variable/_Variable.py", "snippet": "class Variable(GObject.Object):\n valuechanged = GObject.Signal()\n\n def __init__(self, initial_value):\n super().__init__()\n self._value = initial_value\n\n def get_value(self):\n return self._value\n\n def set_value(self, new_value):\n self._value = new_value\n self.emit(\"valuechanged\")\n\n def initial_value(self, value):\n self._value = value\n\n def __str__(self):\n return str(self._value)" }, { "identifier": "BasicProps", "path": "PotatoWidgets/Widget/_Common/_BasicProps.py", "snippet": "class BasicProps(Gtk.Widget):\n def __init__(\n self,\n halign,\n valign,\n hexpand,\n vexpand,\n active,\n visible,\n classname,\n # tooltip,\n css,\n size=[10, 10],\n ):\n Gtk.Widget.__init__(self)\n self.set_hexpand(True if hexpand else False)\n self.set_vexpand(True if vexpand else False)\n self.set_halign(halign)\n self.set_valign(valign)\n self.set_visible(visible)\n self.set_sensitive(active) if active is not None else None\n self.set_classname(classname)\n self.__clasif_size(size)\n self.apply_css(css) if css else None\n\n for key, value in locals().items():\n callback = {\n \"halign\": self.set_halign,\n \"valign\": self.set_valign,\n \"hexpand\": self.set_hexpand,\n \"vexpand\": self.set_vexpand,\n \"active\": self.set_sensitive,\n \"visible\": self.set_visible,\n \"size\": self.set_size,\n \"classname\": self.set_classname,\n }.get(key)\n\n self.bind(value, callback) if callback else None\n\n def set_size(self, size):\n self.__clasif_size(size)\n\n def set_halign(self, param):\n super().set_halign(self.__clasif_align(str(param)))\n\n def set_valign(self, param):\n super().set_valign(self.__clasif_align(str(param)))\n\n def __clasif_size(self, size):\n if isinstance(size, int):\n self.set_size_request(size, size)\n elif isinstance(size, list):\n if len(size) == 2:\n self.set_size_request(size[0], size[1])\n elif len(size) == 1:\n self.set_size_request(size[0], size[0])\n\n def __clasif_align(self, param):\n dict = {\n \"fill\": Gtk.Align.FILL,\n \"start\": Gtk.Align.START,\n \"end\": Gtk.Align.END,\n \"center\": Gtk.Align.CENTER,\n \"baseline\": Gtk.Align.BASELINE,\n }\n return dict.get(param.lower(), Gtk.Align.FILL)\n\n def set_classname(self, param):\n if isinstance(param, (str)):\n context = self.get_style_context()\n [context.add_class(i) for i in param.split(\" \") if i != \" \"]\n elif isinstance(param, (list)):\n for i in param:\n if isinstance(i, (Listener, Variable, Poll)):\n pass\n\n def apply_css(self, css):\n if css:\n self._selfclass = f\"{self.get_css_name().replace()}_{randint(1111, 9999)}\"\n context = self.get_style_context()\n context.add_class(self._selfclass)\n\n context.add_provider(\n Gtk.CssProvider().load_from_data(\n f\".{self._selfclass} {{css}}\".encode()\n ),\n Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION,\n )\n\n def bind(self, var, callback):\n if isinstance(var, (Listener, Variable, Poll)):\n var.connect(\n \"valuechanged\", lambda x: GLib.idle_add(lambda: callback(x.get_value()))\n )" } ]
from ..__Import import * from ..Variable import Listener, Poll, Variable from ._Common._BasicProps import BasicProps
1,886
class Scroll(Gtk.ScrolledWindow, BasicProps): def __init__( self, orientation="h", children=None, attributes=None, css=None, halign="fill", valign="fill", hexpand=False, vexpand=False, visible=True, classname="", ): Gtk.ScrolledWindow.__init__(self) BasicProps.__init__( self, css=css, halign=halign, valign=valign, hexpand=hexpand, vexpand=vexpand, active=None, visible=visible, classname=classname, ) self.__clasif_orientation(orientation) self.set_visible(visible) self.add_with_viewport(children) if children else None attributes(self) if attributes else None for key, value in locals().items(): if key not in [ "self", "halign", "valign", "hexpand", "vexpand", "visible", "active", "visible", "classname",
class Scroll(Gtk.ScrolledWindow, BasicProps): def __init__( self, orientation="h", children=None, attributes=None, css=None, halign="fill", valign="fill", hexpand=False, vexpand=False, visible=True, classname="", ): Gtk.ScrolledWindow.__init__(self) BasicProps.__init__( self, css=css, halign=halign, valign=valign, hexpand=hexpand, vexpand=vexpand, active=None, visible=visible, classname=classname, ) self.__clasif_orientation(orientation) self.set_visible(visible) self.add_with_viewport(children) if children else None attributes(self) if attributes else None for key, value in locals().items(): if key not in [ "self", "halign", "valign", "hexpand", "vexpand", "visible", "active", "visible", "classname",
] and isinstance(value, (Listener, Poll, Variable)):
1
2023-12-30 01:34:01+00:00
4k
0xn0ne/sensitive-helper
sensitive-helper.py
[ { "identifier": "compress", "path": "utils/compress.py", "snippet": "def zip_info(file_path: pathlib.Path) -> Dict[str, Any]:\ndef uncompress_zip(\n file_path: Union[pathlib.Path, str], extract_dir: Union[pathlib.Path, str] = None, is_error: bool = True\n) -> Union[pathlib.Path, Any]:\ndef is_tar(file_path: pathlib.Path):\ndef uncompress_tar(\n file_path: Union[pathlib.Path, str], extract_dir: Union[pathlib.Path, str] = None\n) -> Union[pathlib.Path, Any]:\ndef is_gz(file_path: pathlib.Path):\ndef uncompress_gz(\n file_path: Union[pathlib.Path, str], extract_dir: Union[pathlib.Path, str] = None\n) -> Union[pathlib.Path, Any]:\ndef is_7z(file_path: pathlib.Path):\ndef uncompress_7z(\n file_path: Union[pathlib.Path, str], extract_dir: Union[pathlib.Path, str] = None\n) -> Union[pathlib.Path, Any]:\ndef is_rar(file_path: pathlib.Path):\ndef uncompress_rar(\n file_path: Union[pathlib.Path, str], extract_dir: Union[pathlib.Path, str] = None\n) -> Union[pathlib.Path, Any]:\ndef is_bz(file_path: pathlib.Path):\ndef uncompress(\n file_path: Union[pathlib.Path, str],\n extract_dir: Union[pathlib.Path, str] = None,\n is_error: bool = True,\n is_recursive: bool = False,\n max_level=64,\n) -> Union[pathlib.Path, Any]:" }, { "identifier": "configurator", "path": "utils/configurator.py", "snippet": "_G_CFG = {}\ndef maps_merge(*maps: Dict) -> Dict:\ndef map_merge(dst: Dict, src: Dict):\n def __init__(self, template: Dict = None):\n def get(self, keys: AnyStr, _defult: Any = None, sep: AnyStr = '.'):\n def set(self, keys: AnyStr, value: Any, sep: AnyStr = '.'):\n def loads(self, content: str, fmt: str = 'json', reload: bool = False):\n def dumps(self, fmt: str = 'json'):\n def exists(self, key: AnyStr):\n def gen_pretty(self, objs: Iterable = None, depth: int = 3, filters: List[str] = None):\n def gen_value(value, depth_next, parent_next):\n def recursion_pretty(obj_data, depth_curr, parent: str = ''):\n def __str__(self):\n def __init__(self, filepath: str = 'configs.json', template: Dict = None):\n def load(self, strict: bool = False, quiet: bool = False) -> Union[Exception, Any]:\n def save(self, exist_ok: bool = True):\ndef new(\n name: str = '__DEFAULT__', base_class: Union[Any, FileConfigurator] = FileConfigurator, *args, **kwargs\n) -> FileConfigurator:\nclass BaseConfigurator:\nclass FileConfigurator(BaseConfigurator):" }, { "identifier": "office", "path": "utils/office.py", "snippet": "def docx_handler(file_path: Union[pathlib.Path, str]) -> pathlib.Path:\ndef xlsx_handler(file_path: Union[pathlib.Path, str]):\ndef pptx_handler():" }, { "identifier": "process", "path": "utils/process.py", "snippet": "class ProcessPoolHelper(concurrent.futures.ProcessPoolExecutor):\n def __init__(self, max_workers=None, mp_context=None, initializer=None, initargs=()):\n def submit_super(self, fn, /, *args, **kwargs) -> concurrent.futures.Future:\n def result_yield(self, timeout: float = None) -> Generator[Any, None, None]:\ndef __test_performance_func(min: int = 500, max: int = 600):\ndef __test_return_func(min: int = 500, max: int = 600):\ndef __test_return_dict_func(min: int = 500, max: int = 600):" } ]
import base64 import binascii import csv import json import pathlib import re import time import pandas import tqdm import argparse from typing import Any, AnyStr, Dict, List, Union from utils import compress, configurator, office, process
2,159
if len(result) % 4 != 0: return True, '' try: # 编码错误的全都丢掉,不丢掉也看不懂 ret_extend = base64.b64decode(result).decode('utf-8') if not re.search(r'^[\u0020-\u007F\u2010-\u202f\u3000-\u301f\u4e00-\u9fa5\uff00-\uffef]+$', ret_extend): return True, '' # \u0020-\u007F:英文可视字符集 # \u2010-\u202f:中文部分符号集 # \u3000-\u301f:中文部分符号集 # \u4e00-\u9fa5:中文常见文字集 # \u2e80-\u9fff:中文文字及中文异形文字集 # \uff00-\uffef:中文部分符号集 except UnicodeDecodeError: return True, '' except binascii.Error: return True, '' return False, ret_extend def is_filter_jwt(result: AnyStr): times = 0 res_split = result.split(b'.') while times < 2: if len(res_split[times]) % 4 != 0: return True, '' times += 1 return False, '' def is_filter_result(result: AnyStr, filters: List[AnyStr], flags: int): if not filters: return False, '' for fil in filters: if re.search(fil, result, flags): return True, '' return False, '' # @log_run_times def search_content( file_object: Union[pathlib.Path, bytes], rules: Dict[str, List[str]], split: bytes = b'[\x00-\x1F\x7F]+', is_re_all: bool = False, ) -> List[Dict[str, str]]: ret = [] row_contents = [file_object] if isinstance(file_object, pathlib.Path): row_contents = re.split(split, file_object.read_bytes()) for row_one in row_contents: # 按控制字符进行分割行 if len(row_one) < 12: # 单行内容少于8个字符,丢掉 continue for rule_name in rules: rule = rules[rule_name] flags = 0 filters = None if isinstance(rule, Dict): if 'flags' in rule: flags = string_to_reg_flags(rule['flags']) if 're_filters' in rule: filters = rule['re_filters'] rule = rule['regexp'] for regexp in rule: r_result = re.search(regexp, row_one, flags) if not r_result: continue try: result_byte = r_result.group() result_text = result_byte.decode('utf-8') except UnicodeDecodeError: continue is_filter, extend = is_filter_result(result_byte, filters, flags) if rule_name == 'BASE64': is_filter, extend = is_filter_base64(result_byte) if rule_name == 'JSON WEB TOKEN(JWT)': is_filter, extend = is_filter_jwt(result_byte) if is_filter: continue ret.append( { 'file': file_object.__str__(), 'group': rule_name, 'regexp': regexp.decode('utf-8'), 'match': result_text, 'extend': extend, } ) if not is_re_all: # 如果关闭了匹配所有正则组数据且已发现有用数据,则退出循环 return ret return ret def gen_file_list(src_path: str, exclude_files: List[str]) -> List[pathlib.Path]: tar_path = pathlib.Path(src_path) ret = [] if tar_path.is_file(): ret.append(tar_path) else: for filepath in tar_path.glob('**/*'): is_skip = False if filepath.is_dir(): continue filename = filepath.name for r_exclude in exclude_files: # 文件名正则匹配,在排除名单中则排除文件 if re.match(r_exclude, filename): is_skip = True break if is_skip: continue if filename.endswith('.docx') and not filename.startswith('~$'): office.docx_handler(filepath) elif filename.endswith('.xlsx') and not filename.startswith('~$'): office.xlsx_handler(filepath) else:
#!/bin/python3 # _*_ coding:utf-8 _*_ # # sensitive-helper.py # 本地文件敏感信息搜索工具 def log_run_times(func): def wrapper(*args, **kwargs): s_time = time.time() ret = func(*args, **kwargs) total_time = time.time() - s_time if total_time <= 1: return ret with open('run_times.log', 'a') as _f: _f.write('total time(s): {}, args: {}\n'.format(time.time() - s_time, args[0][:127])) return ret return wrapper def string_to_reg_flags(flags: str): flags_int = 0 for flag in flags.split('|'): flags_int |= getattr(re, flag) return flags_int def is_filter_base64(result: AnyStr): if len(result) % 4 != 0: return True, '' try: # 编码错误的全都丢掉,不丢掉也看不懂 ret_extend = base64.b64decode(result).decode('utf-8') if not re.search(r'^[\u0020-\u007F\u2010-\u202f\u3000-\u301f\u4e00-\u9fa5\uff00-\uffef]+$', ret_extend): return True, '' # \u0020-\u007F:英文可视字符集 # \u2010-\u202f:中文部分符号集 # \u3000-\u301f:中文部分符号集 # \u4e00-\u9fa5:中文常见文字集 # \u2e80-\u9fff:中文文字及中文异形文字集 # \uff00-\uffef:中文部分符号集 except UnicodeDecodeError: return True, '' except binascii.Error: return True, '' return False, ret_extend def is_filter_jwt(result: AnyStr): times = 0 res_split = result.split(b'.') while times < 2: if len(res_split[times]) % 4 != 0: return True, '' times += 1 return False, '' def is_filter_result(result: AnyStr, filters: List[AnyStr], flags: int): if not filters: return False, '' for fil in filters: if re.search(fil, result, flags): return True, '' return False, '' # @log_run_times def search_content( file_object: Union[pathlib.Path, bytes], rules: Dict[str, List[str]], split: bytes = b'[\x00-\x1F\x7F]+', is_re_all: bool = False, ) -> List[Dict[str, str]]: ret = [] row_contents = [file_object] if isinstance(file_object, pathlib.Path): row_contents = re.split(split, file_object.read_bytes()) for row_one in row_contents: # 按控制字符进行分割行 if len(row_one) < 12: # 单行内容少于8个字符,丢掉 continue for rule_name in rules: rule = rules[rule_name] flags = 0 filters = None if isinstance(rule, Dict): if 'flags' in rule: flags = string_to_reg_flags(rule['flags']) if 're_filters' in rule: filters = rule['re_filters'] rule = rule['regexp'] for regexp in rule: r_result = re.search(regexp, row_one, flags) if not r_result: continue try: result_byte = r_result.group() result_text = result_byte.decode('utf-8') except UnicodeDecodeError: continue is_filter, extend = is_filter_result(result_byte, filters, flags) if rule_name == 'BASE64': is_filter, extend = is_filter_base64(result_byte) if rule_name == 'JSON WEB TOKEN(JWT)': is_filter, extend = is_filter_jwt(result_byte) if is_filter: continue ret.append( { 'file': file_object.__str__(), 'group': rule_name, 'regexp': regexp.decode('utf-8'), 'match': result_text, 'extend': extend, } ) if not is_re_all: # 如果关闭了匹配所有正则组数据且已发现有用数据,则退出循环 return ret return ret def gen_file_list(src_path: str, exclude_files: List[str]) -> List[pathlib.Path]: tar_path = pathlib.Path(src_path) ret = [] if tar_path.is_file(): ret.append(tar_path) else: for filepath in tar_path.glob('**/*'): is_skip = False if filepath.is_dir(): continue filename = filepath.name for r_exclude in exclude_files: # 文件名正则匹配,在排除名单中则排除文件 if re.match(r_exclude, filename): is_skip = True break if is_skip: continue if filename.endswith('.docx') and not filename.startswith('~$'): office.docx_handler(filepath) elif filename.endswith('.xlsx') and not filename.startswith('~$'): office.xlsx_handler(filepath) else:
compress.uncompress(filepath, is_error=False, is_recursive=True)
0
2023-12-26 03:30:39+00:00
4k
Zerohertz/Streamlit-Quant
lib/visual.py
[ { "identifier": "_main", "path": "lib/layout.py", "snippet": "def _main():\n layout = _default()\n layout.height = 500 * st.session_state[\"scale\"]\n layout.width = 1000\n layout.xaxis = {\n \"type\": \"category\",\n \"gridcolor\": \"black\",\n \"tickangle\": -45,\n \"tickfont\": {\"color\": \"black\"},\n \"showgrid\": True,\n \"tickmode\": \"auto\",\n \"nticks\": 20,\n \"rangeslider\": {\"visible\": False},\n }\n layout.yaxis = {\n \"gridcolor\": \"black\",\n \"tickprefix\": \"₩\",\n \"tickformat\": \",\",\n \"tickfont\": {\"color\": \"black\"},\n \"showgrid\": True,\n \"autorange\": True,\n }\n if not st.session_state[\"cache\"][\"vis_signals\"]:\n return layout\n layout.yaxis2 = {\n \"overlaying\": \"y\",\n \"side\": \"right\",\n \"tickfont\": {\"color\": \"white\"},\n \"showgrid\": False,\n }\n layout.shapes = st.session_state[\"cache\"][\"transaction_vert\"]\n if st.session_state[\"cache\"][\"method\"] != \"Quant\":\n layout.yaxis3 = {\n \"overlaying\": \"y\",\n \"side\": \"right\",\n \"tickfont\": {\"color\": \"white\"},\n \"showgrid\": False,\n }\n return layout" }, { "identifier": "_transaction", "path": "lib/layout.py", "snippet": "def _transaction():\n layout = _default()\n layout.height = 400 * st.session_state[\"scale\"]\n layout.width = 1000\n return layout" }, { "identifier": "_color", "path": "lib/util.py", "snippet": "def _color(cnt, alpha=0.99, palette=\"husl\"):\n colors = []\n colors_ = zz.plot.color(cnt, uint8=True, palette=palette)\n if cnt == 1:\n colors_ = [colors_]\n for color_ in colors_:\n colors.append(\"rgba(\" + \",\".join(list(map(str, color_))) + f\",{alpha})\")\n return colors" } ]
import plotly.graph_objs as go import streamlit as st import zerohertzLib as zz from plotly.subplots import make_subplots from lib.layout import _main, _transaction from lib.util import _color
2,410
x=st.session_state["cache"]["transaction"]["period"], name="Period", marker_color="#0a0a80", nbinsx=20, ), row=1, col=3, ) fig.update_xaxes( gridcolor="black", tickangle=-45, tickprefix="₩", tickformat=",", tickfont={"color": "black"}, showgrid=True, tickmode="auto", row=1, col=1, ) fig.update_yaxes( gridcolor="black", tickfont={"color": "black"}, showgrid=True, autorange=True, row=1, col=1, ) fig.update_xaxes( gridcolor="black", tickangle=-45, tickfont={"color": "black"}, showgrid=True, tickmode="auto", ticksuffix="%", tickformat=".2f", row=1, col=2, ) fig.update_yaxes( gridcolor="black", tickfont={"color": "black"}, showgrid=True, autorange=True, row=1, col=2, ) fig.update_xaxes( gridcolor="black", tickangle=-45, tickfont={"color": "black"}, showgrid=True, tickmode="auto", ticksuffix="days", row=1, col=3, ) fig.update_yaxes( gridcolor="black", tickfont={"color": "black"}, showgrid=True, autorange=True, row=1, col=3, ) return fig def _vert(xdata, signal, logic, threshold=(-1, 1)): threshold_sell, threshold_buy = threshold if logic == 1: dash = "solid" color = "rgba(255, 0, 0, 0.2)" elif logic == -1: dash = "solid" color = "rgba(0, 0, 255, 0.2)" elif logic == 2: dash = "longdashdot" color = "rgba(255, 0, 0, 0.2)" elif logic == -2: dash = "longdashdot" color = "rgba(0, 0, 255, 0.2)" elif signal >= threshold_buy: dash = "dash" color = "rgba(255, 0, 0, 0.2)" elif signal <= threshold_sell: dash = "dash" color = "rgba(0, 0, 255, 0.2)" else: return None return go.layout.Shape( type="line", x0=xdata, y0=0, x1=xdata, y1=1, xref="x", yref="paper", line={"color": color, "width": 2, "dash": dash}, ) def main(): figs = [st.session_state["cache"]["candle"]] if st.session_state["cache"]["vis_ma"]: figs += st.session_state["cache"]["ma"] if st.session_state["cache"]["vis_bollinger"]: figs += st.session_state["cache"]["bollinger"] if st.session_state["cache"]["vis_signals"]: figs += st.session_state["cache"]["quant"] st.plotly_chart( go.Figure( data=figs, layout=_main(), ), use_container_width=True, ) def transaction(): fig = _backtest()
def candle(): data, xdata = st.session_state["cache"]["data"], st.session_state["cache"]["xdata"] st.session_state["cache"]["candle"] = go.Candlestick( x=xdata, open=data.Open, high=data.High, low=data.Low, close=data.Close, increasing={"line": {"color": "red"}}, decreasing={"line": {"color": "blue"}}, name=st.session_state["cache"]["name"], ) st.session_state["logger"].info( f"""[Plot] Candle Chart: {st.session_state["cache"]["name"]} ({st.session_state["cache"]["symbol"]})""" ) def moving_average(): xdata = st.session_state["cache"]["xdata"] st.session_state["cache"]["ma"] = [] colors = _color(4, 0.5, "Set1") for idx, window in enumerate([5, 20, 60, 120]): st.session_state["cache"]["ma"].append( go.Scatter( x=xdata, y=st.session_state["cache"]["data"] .iloc[:, :4] .mean(1) .rolling(window) .mean(), mode="lines", name=f"MA{window}", line={"color": colors[idx]}, ) ) st.session_state["logger"].info( f"""[Plot] Moving Average: {st.session_state["cache"]["name"]} ({st.session_state["cache"]["symbol"]})""" ) def bollinger_bands(): bands = zz.quant.util._bollinger_bands(st.session_state["cache"]["data"]) xdata = st.session_state["cache"]["xdata"] st.session_state["cache"]["bollinger"] = [] for col_, name_, color_ in zip( ["lower_band", "middle_band", "upper_band"], ["Lower", "Middle", "Upper"], ["rgba(255, 0, 0, 0.5)", "rgba(0, 255, 0, 0.5)", "rgba(0, 0, 255, 0.5)"], ): st.session_state["cache"]["bollinger"].append( go.Scatter( x=xdata, y=bands[col_], mode="lines", name=name_, line={"color": color_}, ) ) st.session_state["logger"].info( f"""[Plot] Bollinger Bands: {st.session_state["cache"]["name"]} ({st.session_state["cache"]["symbol"]})""" ) def _signal(signals): st.session_state["cache"]["quant"] = [] if isinstance(signals, zz.quant.Quant): threshold_sell, threshold_buy = signals.threshold_sell, signals.threshold_buy signals = signals.signals else: threshold_sell, threshold_buy = -1, 1 colors = _color(len(signals.columns)) for idx, col in enumerate(signals.columns[:-2]): signals[col] st.session_state["cache"]["quant"].append( go.Scatter( x=st.session_state["cache"]["xdata"], y=signals[col], yaxis="y3", mode="lines", name=zz.quant.util._method2str(col), line={"color": colors[idx]}, ) ) st.session_state["cache"]["quant"].append( go.Scatter( x=st.session_state["cache"]["xdata"], y=signals.signals, yaxis="y2", mode="lines", name="Signal", line={"color": "rgba(0, 0, 0, 0.5)"}, ) ) st.session_state["cache"]["transaction_vert"] = [] for day, sig, log in zip( st.session_state["cache"]["xdata"], signals.signals, signals.logic, ): vert_ = _vert(day, sig, log, (threshold_sell, threshold_buy)) if vert_ is not None: st.session_state["cache"]["transaction_vert"].append(vert_) def _backtest(): fig = make_subplots(rows=1, cols=3) fig.add_trace( go.Histogram( x=st.session_state["cache"]["transaction"]["buy"], name="Buy", marker_color="red", nbinsx=20, ), row=1, col=1, ) fig.add_trace( go.Histogram( x=st.session_state["cache"]["transaction"]["sell"], name="Sell", marker_color="blue", nbinsx=20, ), row=1, col=1, ) fig.add_trace( go.Histogram( x=st.session_state["cache"]["transaction"]["profit"], name="Profit", marker_color="#0a800a", nbinsx=20, ), row=1, col=2, ) fig.add_trace( go.Histogram( x=st.session_state["cache"]["transaction"]["period"], name="Period", marker_color="#0a0a80", nbinsx=20, ), row=1, col=3, ) fig.update_xaxes( gridcolor="black", tickangle=-45, tickprefix="₩", tickformat=",", tickfont={"color": "black"}, showgrid=True, tickmode="auto", row=1, col=1, ) fig.update_yaxes( gridcolor="black", tickfont={"color": "black"}, showgrid=True, autorange=True, row=1, col=1, ) fig.update_xaxes( gridcolor="black", tickangle=-45, tickfont={"color": "black"}, showgrid=True, tickmode="auto", ticksuffix="%", tickformat=".2f", row=1, col=2, ) fig.update_yaxes( gridcolor="black", tickfont={"color": "black"}, showgrid=True, autorange=True, row=1, col=2, ) fig.update_xaxes( gridcolor="black", tickangle=-45, tickfont={"color": "black"}, showgrid=True, tickmode="auto", ticksuffix="days", row=1, col=3, ) fig.update_yaxes( gridcolor="black", tickfont={"color": "black"}, showgrid=True, autorange=True, row=1, col=3, ) return fig def _vert(xdata, signal, logic, threshold=(-1, 1)): threshold_sell, threshold_buy = threshold if logic == 1: dash = "solid" color = "rgba(255, 0, 0, 0.2)" elif logic == -1: dash = "solid" color = "rgba(0, 0, 255, 0.2)" elif logic == 2: dash = "longdashdot" color = "rgba(255, 0, 0, 0.2)" elif logic == -2: dash = "longdashdot" color = "rgba(0, 0, 255, 0.2)" elif signal >= threshold_buy: dash = "dash" color = "rgba(255, 0, 0, 0.2)" elif signal <= threshold_sell: dash = "dash" color = "rgba(0, 0, 255, 0.2)" else: return None return go.layout.Shape( type="line", x0=xdata, y0=0, x1=xdata, y1=1, xref="x", yref="paper", line={"color": color, "width": 2, "dash": dash}, ) def main(): figs = [st.session_state["cache"]["candle"]] if st.session_state["cache"]["vis_ma"]: figs += st.session_state["cache"]["ma"] if st.session_state["cache"]["vis_bollinger"]: figs += st.session_state["cache"]["bollinger"] if st.session_state["cache"]["vis_signals"]: figs += st.session_state["cache"]["quant"] st.plotly_chart( go.Figure( data=figs, layout=_main(), ), use_container_width=True, ) def transaction(): fig = _backtest()
fig.update_layout(_transaction())
1
2023-12-26 11:29:06+00:00
4k
lvyufeng/uie_mindspore
uie_predictor.py
[ { "identifier": "ErnieMTokenizerFast", "path": "tokenizer.py", "snippet": "class ErnieMTokenizerFast(PreTrainedTokenizerFast):\n r\"\"\"\n Construct a \"fast\" ERNIE-M tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.\n This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\n refer to this superclass for more information regarding those methods.\n Args:\n vocab_file (`str`):\n File containing the vocabulary.\n sentencepiece_model_file (`str`):\n [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that\n contains the vocabulary necessary to instantiate a tokenizer.\n do_lower_case (`bool`, *optional*, defaults to `True`):\n Whether or not to lowercase the input when tokenizing.\n unk_token (`str`, *optional*, defaults to `\"[UNK]\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n sep_token (`str`, *optional*, defaults to `\"[SEP]\"`):\n The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for\n sequence classification or for a text and a question for question answering. It is also used as the last\n token of a sequence built with special tokens.\n pad_token (`str`, *optional*, defaults to `\"[PAD]\"`):\n The token used for padding, for example when batching sequences of different lengths.\n cls_token (`str`, *optional*, defaults to `\"[CLS]\"`):\n The classifier token which is used when doing sequence classification (classification of the whole sequence\n instead of per-token classification). It is the first token of the sequence when built with special tokens.\n mask_token (`str`, *optional*, defaults to `\"[MASK]\"`):\n The token used for masking values. This is the token used when training this model with masked language\n modeling. This is the token which the model will try to predict.\n clean_text (`bool`, *optional*, defaults to `True`):\n Whether or not to clean the text before tokenization by removing any control characters and replacing all\n whitespaces by the classic one.\n tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):\n Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this\n issue](https://github.com/huggingface/transformers/issues/328)).\n strip_accents (`bool`, *optional*):\n Whether or not to strip all accents. If this option is not specified, then it will be determined by the\n value for `lowercase` (as in the original ERNIE-M).\n wordpieces_prefix (`str`, *optional*, defaults to `\"##\"`):\n The prefix for subwords.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n slow_tokenizer_class = ErnieMTokenizer\n\n def __init__(\n self,\n vocab_file=None,\n sentencepiece_model_file=None,\n tokenizer_file=None,\n do_lower_case=True,\n unk_token=\"[UNK]\",\n sep_token=\"[SEP]\",\n pad_token=\"[PAD]\",\n cls_token=\"[CLS]\",\n mask_token=\"[MASK]\",\n tokenize_chinese_chars=True,\n strip_accents=None,\n **kwargs\n ):\n super().__init__(\n vocab_file,\n sentencepiece_model_file,\n tokenizer_file=tokenizer_file,\n do_lower_case=do_lower_case,\n unk_token=unk_token,\n sep_token=sep_token,\n pad_token=pad_token,\n cls_token=cls_token,\n mask_token=mask_token,\n tokenize_chinese_chars=tokenize_chinese_chars,\n strip_accents=strip_accents,\n **kwargs,\n )\n\n normalizer_state = json.loads(\n self.backend_tokenizer.normalizer.__getstate__())\n if (\n normalizer_state.get(\"lowercase\", do_lower_case) != do_lower_case\n or normalizer_state.get(\"strip_accents\", strip_accents) != strip_accents\n or normalizer_state.get(\"handle_chinese_chars\", tokenize_chinese_chars) != tokenize_chinese_chars\n ):\n normalizer_class = getattr(\n normalizers, normalizer_state.pop(\"type\"))\n normalizer_state[\"lowercase\"] = do_lower_case\n normalizer_state[\"strip_accents\"] = strip_accents\n normalizer_state[\"handle_chinese_chars\"] = tokenize_chinese_chars\n self.backend_tokenizer.normalizer = normalizer_class(\n **normalizer_state)\n\n self.do_lower_case = do_lower_case\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A ERNIE-M sequence has the following format:\n - single sequence: `[CLS] X [SEP]`\n - pair of sequences: `[CLS] A [SEP] B [SEP]`\n Args:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n Returns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n \"\"\"\n output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n\n if token_ids_1:\n output += [self.sep_token_id] + token_ids_1 + [self.sep_token_id]\n\n return output\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ERNIE-M sequence\n pair mask has the following format:\n ```\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n ```\n If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n Returns:\n `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).\n \"\"\"\n\n if token_ids_1 is None:\n return (len(token_ids_0) + 2) * [0]\n return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n files = self._tokenizer.model.save(\n save_directory, name=filename_prefix)\n return tuple(files)\n\n @property\n def added_tokens_encoder(self) -> Dict[str, int]:\n \"\"\"\n Returns the sorted mapping from string to index. The added tokens encoder is cached for performance\n optimisation in `self._added_tokens_encoder` for the slow tokenizers.\n \"\"\"\n # return {k.content: v for v, k in sorted(self._tokenizer.get_vocab().items(), key=lambda item: item[0])}\n return self._tokenizer.get_vocab()" }, { "identifier": "logger", "path": "utils.py", "snippet": "def set_seed(seed):\ndef get_span(start_ids, end_ids, with_prob=False):\ndef get_bool_ids_greater_than(probs, limit=0.5, return_prob=False):\n def __init__(self):\n def compute(self, start_probs, end_probs, gold_start_ids, gold_end_ids):\n def update(self, num_correct_spans, num_infer_spans, num_label_spans):\n def eval_span(self, predict_start_ids, predict_end_ids, label_start_ids,\n label_end_ids):\n def accumulate(self):\n def reset(self):\n def name(self):\ndef convert_example(example, tokenizer, max_seq_len):\ndef map_offset(ori_offset, offset_mapping):\ndef reader(data_path, max_seq_len=512):\ndef unify_prompt_name(prompt):\n def __init__(self, name: str = None):\n def disable(self):\n def enable(self):\n def is_enable(self) -> bool:\n def __call__(self, log_level: str, msg: str):\n def use_terminator(self, terminator: str):\n def processing(self, msg: str, interval: float = 0.1):\n def _printer():\ndef get_id_and_prob(spans, offset_map):\ndef cut_chinese_sent(para):\ndef dbc2sbc(s):\n def __init__(self, patience=7, verbose=False, delta=0, save_dir='checkpoint/early_stopping', trace_func=print):\n def __call__(self, val_loss, model):\n def save_checkpoint(self, val_loss, model):\ndef get_relation_type_dict(relation_data):\n def compare(a, b):\ndef add_entity_negative_example(examples, texts, prompts, label_set,\n negative_ratio):\ndef add_relation_negative_example(redundants, text, num_positive, ratio):\ndef add_full_negative_example(examples, texts, relation_prompts, predicate_set,\n subject_goldens):\ndef generate_cls_example(text, labels, prompt_prefix, options):\ndef convert_cls_examples(raw_examples,\n prompt_prefix=\"情感倾向\",\n options=[\"正向\", \"负向\"]):\ndef convert_ext_examples(raw_examples,\n negative_ratio,\n prompt_prefix=\"情感倾向\",\n options=[\"正向\", \"负向\"],\n separator=\"##\",\n is_train=True):\n def _sep_cls_label(label, separator):\ndef get_path_from_url(url,\n root_dir,\n check_exist=True,\n decompress=True):\n def is_url(path):\n def _map_path(url, root_dir):\n def _get_download(url, fullname):\n def _download(url, path):\n def _uncompress_file_zip(filepath):\n def _is_a_single_file(file_list):\n def _is_a_single_dir(file_list):\n def _uncompress_file_tar(filepath, mode=\"r:*\"):\n def _decompress(fname):\nclass SpanEvaluator:\nclass Logger(object):\nclass EarlyStopping:\nBAR_FORMAT = f'{{desc}}: {Fore.GREEN}{{percentage:3.0f}}%{Fore.RESET} {Fore.BLUE}{{bar}}{Fore.RESET} {Fore.GREEN}{{n_fmt}}/{{total_fmt}} {Fore.RED}{{rate_fmt}}{{postfix}}{Fore.RESET} eta {Fore.CYAN}{{remaining}}{Fore.RESET}'\nBAR_FORMAT_NO_TIME = f'{{desc}}: {Fore.GREEN}{{percentage:3.0f}}%{Fore.RESET} {Fore.BLUE}{{bar}}{Fore.RESET} {Fore.GREEN}{{n_fmt}}/{{total_fmt}}{Fore.RESET}'\nBAR_TYPE = [\n \"░▝▗▖▘▚▞▛▙█\",\n \"░▖▘▝▗▚▞█\",\n \" ▖▘▝▗▚▞█\",\n \"░▒█\",\n \" >=\",\n \" ▏▎▍▌▋▊▉█\"\n \"░▏▎▍▌▋▊▉█\"\n]\n DOWNLOAD_RETRY_LIMIT = 3" } ]
import re import numpy as np import math import argparse import mindspore from mindnlp.transformers import UIE, UIEM from tokenizer import ErnieMTokenizerFast from utils import logger, get_bool_ids_greater_than, get_span, get_id_and_prob, cut_chinese_sent, dbc2sbc from mindnlp.transformers import BertTokenizerFast
3,372
class MindSporeInferBackend: def __init__(self, model_path_prefix, multilingual=False, use_fp16=False): logger.info(">>> [MindSporeInferBackend] Creating Engine ...") if multilingual: self.model = UIEM.from_pretrained(model_path_prefix) else: self.model = UIE.from_pretrained(model_path_prefix) self.model.set_train(False) if use_fp16: logger.info( ">>> [MindSporeInferBackend] Use FP16 to inference ...") self.model = self.model.half() logger.info(">>> [MindSporeInferBackend] Engine Created ...") def infer(self, input_dict): for input_name, input_value in input_dict.items(): input_value = mindspore.Tensor(input_value) input_dict[input_name] = input_value outputs = self.model(**input_dict) start_prob, end_prob = outputs[0], outputs[1] start_prob = start_prob.asnumpy() end_prob = end_prob.asnumpy() return start_prob, end_prob class UIEPredictor(object): def __init__(self, model, schema, task_path=None, schema_lang="zh", engine='mindspore', position_prob=0.5, max_seq_len=512, batch_size=64, split_sentence=False, use_fp16=False): if model in ['uie-m-base', 'uie-m-large']: self._multilingual = True else: self._multilingual = False self._model = model self._engine = engine self._task_path = task_path self._position_prob = position_prob self._max_seq_len = max_seq_len self._batch_size = batch_size self._split_sentence = split_sentence self._use_fp16 = use_fp16 self._schema_tree = None self._is_en = True if model in ['uie-base-en' ] or schema_lang == 'en' else False self.set_schema(schema) self._prepare_predictor() def _prepare_predictor(self): assert self._engine in ['mindspore'], "engine must be mindspore!" if self._task_path is None: self._task_path = self._model if self._multilingual:
class MindSporeInferBackend: def __init__(self, model_path_prefix, multilingual=False, use_fp16=False): logger.info(">>> [MindSporeInferBackend] Creating Engine ...") if multilingual: self.model = UIEM.from_pretrained(model_path_prefix) else: self.model = UIE.from_pretrained(model_path_prefix) self.model.set_train(False) if use_fp16: logger.info( ">>> [MindSporeInferBackend] Use FP16 to inference ...") self.model = self.model.half() logger.info(">>> [MindSporeInferBackend] Engine Created ...") def infer(self, input_dict): for input_name, input_value in input_dict.items(): input_value = mindspore.Tensor(input_value) input_dict[input_name] = input_value outputs = self.model(**input_dict) start_prob, end_prob = outputs[0], outputs[1] start_prob = start_prob.asnumpy() end_prob = end_prob.asnumpy() return start_prob, end_prob class UIEPredictor(object): def __init__(self, model, schema, task_path=None, schema_lang="zh", engine='mindspore', position_prob=0.5, max_seq_len=512, batch_size=64, split_sentence=False, use_fp16=False): if model in ['uie-m-base', 'uie-m-large']: self._multilingual = True else: self._multilingual = False self._model = model self._engine = engine self._task_path = task_path self._position_prob = position_prob self._max_seq_len = max_seq_len self._batch_size = batch_size self._split_sentence = split_sentence self._use_fp16 = use_fp16 self._schema_tree = None self._is_en = True if model in ['uie-base-en' ] or schema_lang == 'en' else False self.set_schema(schema) self._prepare_predictor() def _prepare_predictor(self): assert self._engine in ['mindspore'], "engine must be mindspore!" if self._task_path is None: self._task_path = self._model if self._multilingual:
self._tokenizer = ErnieMTokenizerFast.from_pretrained(
0
2023-12-25 11:02:24+00:00
4k
Tongjilibo/bert4vector
bert4vector/bert.py
[ { "identifier": "Base", "path": "bert4vector/base.py", "snippet": "class Base:\n \"\"\"\n Interface for similarity compute and search.\n\n In all instances, there is a corpus against which we want to perform the similarity search.\n For each similarity search, the input is a document or a corpus, and the output are the similarities\n to individual corpus documents.\n \"\"\"\n\n def add_corpus(self, corpus: Union[List[str], Dict[str, str]]):\n \"\"\"\n Extend the corpus with new documents.\n\n Parameters\n ----------\n corpus : list of str\n \"\"\"\n raise NotImplementedError(\"cannot instantiate Abstract Base Class\")\n\n def similarity(self, a: Union[str, List[str]], b: Union[str, List[str]]):\n \"\"\"\n Compute similarity between two texts.\n :param a: list of str or str\n :param b: list of str or str\n :param score_function: function to compute similarity, default cos_sim\n :return: similarity score, torch.Tensor, Matrix with res[i][j] = cos_sim(a[i], b[j])\n \"\"\"\n raise NotImplementedError(\"cannot instantiate Abstract Base Class\")\n\n def distance(self, a: Union[str, List[str]], b: Union[str, List[str]]):\n \"\"\"Compute cosine distance between two texts.\"\"\"\n raise NotImplementedError(\"cannot instantiate Abstract Base Class\")\n\n def most_similar(self, queries: Union[str, List[str], Dict[str, str]], topk: int = 10):\n \"\"\"\n Find the topk most similar texts to the query against the corpus.\n :param queries: Dict[str(query_id), str(query_text)] or List[str] or str\n :param topk: int\n :return: Dict[str, Dict[str, float]], {query_id: {corpus_id: similarity_score}, ...}\n \"\"\"\n raise NotImplementedError(\"cannot instantiate Abstract Base Class\")\n\n def search(self, queries: Union[str, List[str], Dict[str, str]], topk: int = 10):\n \"\"\"\n Find the topk most similar texts to the query against the corpus.\n :param queries: Dict[str(query_id), str(query_text)] or List[str] or str\n :param topk: int\n :return: Dict[str, Dict[str, float]], {query_id: {corpus_id: similarity_score}, ...}\n \"\"\"\n return self.most_similar(queries, topk=topk)\n \n def save_embeddings(index_path):\n pass\n\n def load_embeddings(index_path):\n pass" }, { "identifier": "cos_sim", "path": "bert4vector/utils.py", "snippet": "def cos_sim(a: Union[torch.Tensor, np.ndarray], b: Union[torch.Tensor, np.ndarray]):\n \"\"\"\n Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.\n :return: Matrix with res[i][j] = cos_sim(a[i], b[j])\n \"\"\"\n if not isinstance(a, torch.Tensor):\n a = torch.tensor(a)\n\n if not isinstance(b, torch.Tensor):\n b = torch.tensor(b)\n\n if len(a.shape) == 1:\n a = a.unsqueeze(0)\n\n if len(b.shape) == 1:\n b = b.unsqueeze(0)\n\n a_norm = torch.nn.functional.normalize(a, p=2, dim=1)\n b_norm = torch.nn.functional.normalize(b, p=2, dim=1)\n return torch.mm(a_norm, b_norm.transpose(0, 1))" }, { "identifier": "dot_score", "path": "bert4vector/utils.py", "snippet": "def dot_score(a: Union[torch.Tensor, np.ndarray], b: Union[torch.Tensor, np.ndarray]):\n \"\"\"\n Computes the dot-product dot_prod(a[i], b[j]) for all i and j.\n :return: Matrix with res[i][j] = dot_prod(a[i], b[j])\n \"\"\"\n if not isinstance(a, torch.Tensor):\n a = torch.tensor(a)\n\n if not isinstance(b, torch.Tensor):\n b = torch.tensor(b)\n\n if len(a.shape) == 1:\n a = a.unsqueeze(0)\n\n if len(b.shape) == 1:\n b = b.unsqueeze(0)\n\n return torch.mm(a, b.transpose(0, 1))" }, { "identifier": "semantic_search", "path": "bert4vector/utils.py", "snippet": "def semantic_search(\n query_embeddings: Union[torch.Tensor, np.ndarray],\n corpus_embeddings: Union[torch.Tensor, np.ndarray],\n query_chunk_size: int = 100,\n corpus_chunk_size: int = 500000,\n top_k: int = 10,\n score_function=cos_sim\n):\n \"\"\"\n This function performs a cosine similarity search between a list of query embeddings and corpus embeddings.\n It can be used for Information Retrieval / Semantic Search for corpora up to about 1 Million entries.\n\n :param query_embeddings: A 2-dimensional tensor with the query embeddings.\n :param corpus_embeddings: A 2-dimensional tensor with the corpus embeddings.\n :param query_chunk_size: Process 100 queries simultaneously. Increasing that value increases the speed, but\n requires more memory.\n :param corpus_chunk_size: Scans the corpus 100k entries at a time. Increasing that value increases the speed,\n but requires more memory.\n :param top_k: Retrieve top k matching entries.\n :param score_function: Funtion for computing scores. By default, cosine similarity.\n :return: Returns a list with one entry for each query. Each entry is a list of dictionaries with the keys\n 'corpus_id' and 'score', sorted by decreasing cosine similarity scores.\n \"\"\"\n\n if isinstance(query_embeddings, (np.ndarray, np.generic)):\n query_embeddings = torch.from_numpy(query_embeddings)\n elif isinstance(query_embeddings, list):\n query_embeddings = torch.stack(query_embeddings)\n\n if len(query_embeddings.shape) == 1:\n query_embeddings = query_embeddings.unsqueeze(0)\n\n if isinstance(corpus_embeddings, (np.ndarray, np.generic)):\n corpus_embeddings = torch.from_numpy(corpus_embeddings)\n elif isinstance(corpus_embeddings, list):\n corpus_embeddings = torch.stack(corpus_embeddings)\n\n # Check that corpus and queries are on the same device\n if corpus_embeddings.device != query_embeddings.device:\n query_embeddings = query_embeddings.to(corpus_embeddings.device)\n\n queries_result_list = [[] for _ in range(len(query_embeddings))]\n\n for query_start_idx in range(0, len(query_embeddings), query_chunk_size):\n # Iterate over chunks of the corpus\n for corpus_start_idx in range(0, len(corpus_embeddings), corpus_chunk_size):\n # Compute cosine similarity\n cos_scores = score_function(query_embeddings[query_start_idx:query_start_idx + query_chunk_size],\n corpus_embeddings[corpus_start_idx:corpus_start_idx + corpus_chunk_size])\n\n # Get top-k scores\n cos_scores_top_k_values, cos_scores_top_k_idx = torch.topk(\n cos_scores, min(top_k, len(cos_scores[0])), dim=1, largest=True, sorted=False)\n cos_scores_top_k_values = cos_scores_top_k_values.cpu().tolist()\n cos_scores_top_k_idx = cos_scores_top_k_idx.cpu().tolist()\n\n for query_itr in range(len(cos_scores)):\n for sub_corpus_id, score in zip(cos_scores_top_k_idx[query_itr], cos_scores_top_k_values[query_itr]):\n corpus_id = corpus_start_idx + sub_corpus_id\n query_id = query_start_idx + query_itr\n if len(queries_result_list[query_id]) < top_k:\n heapq.heappush(queries_result_list[query_id], (\n score, corpus_id)) # heaqp tracks the quantity of the first element in the tuple\n else:\n heapq.heappushpop(queries_result_list[query_id], (score, corpus_id))\n\n # change the data format and sort\n for query_id in range(len(queries_result_list)):\n for doc_itr in range(len(queries_result_list[query_id])):\n score, corpus_id = queries_result_list[query_id][doc_itr]\n queries_result_list[query_id][doc_itr] = {'corpus_id': corpus_id, 'score': score}\n queries_result_list[query_id] = sorted(queries_result_list[query_id], key=lambda x: x['score'],\n reverse=True)\n\n return queries_result_list" } ]
from loguru import logger from typing import List, Union, Dict from bert4torch.pipelines import Text2Vec from bert4vector.base import Base from bert4vector.utils import cos_sim, dot_score, semantic_search from sentence_transformers import SentenceTransformer import numpy as np import json
2,118
class BertVector(Base): def __init__(self, model_path, corpus: Union[List[str], Dict[str, str]] = None, **model_config): """ Initialize the similarity object. :param checkpoint_path: 模型权重地址 :param config_path: 权重的config地址 :param corpus: Corpus of documents to use for similarity queries. :param device: Device (like 'cuda' / 'cpu') to use for the computation. """ self.model = self.build_model(model_path, **model_config)
class BertVector(Base): def __init__(self, model_path, corpus: Union[List[str], Dict[str, str]] = None, **model_config): """ Initialize the similarity object. :param checkpoint_path: 模型权重地址 :param config_path: 权重的config地址 :param corpus: Corpus of documents to use for similarity queries. :param device: Device (like 'cuda' / 'cpu') to use for the computation. """ self.model = self.build_model(model_path, **model_config)
self.score_functions = {'cos_sim': cos_sim, 'dot': dot_score}
2
2023-12-25 01:18:52+00:00
4k
SamsungLabs/ShellRecontruction
shell/models/shell_reconstructor_model.py
[ { "identifier": "UNet", "path": "shell/models/utils.py", "snippet": "class UNet(torch.nn.Module):\n def __init__(\n self,\n in_channels=1,\n out_channels=2,\n depth=5,\n wf=6,\n padding=True,\n normalization=None,\n up_mode='upconv',\n use_skip=True,\n fm_cap=2**10,\n legacy=False,\n double_pool=False\n\n ):\n \"\"\"\n Using the default arguments will yield the exact version used\n in the original paper\n\n Args:\n in_channels (int): number of input channels\n depth (int): depth of the network\n wf (int): number of filters in the first layer is 2**wf\n padding (bool): if True, apply padding such that the input shape\n is the same as the output.\n This may introduce artifacts\n up_mode (str): one of 'upconv' or 'upsample'.\n 'upconv' will use transposed convolutions for\n learned upsampling.\n 'upsample' will use bilinear upsampling.\n \"\"\"\n super().__init__()\n assert up_mode in ('upconv', 'upsample')\n self.padding = padding\n self.depth = depth\n self.legacy = legacy\n self.use_skip = use_skip\n self.fm_cap = fm_cap\n self.double_pool = double_pool\n final_out_channels = out_channels\n prev_channels = in_channels\n\n self.down_path = torch.nn.ModuleList()\n for i in range(depth):\n in_channels = prev_channels\n out_channels = 2 ** (wf + i)\n out_channels = min(self.fm_cap, out_channels)\n\n #print (i, in_channels, out_channels)\n self.down_path.append(\n UNetConvBlock(in_channels, out_channels, padding, normalization)\n )\n prev_channels = out_channels\n\n self.up_path = torch.nn.ModuleList()\n\n #print (\"up\")\n for i in reversed(range(depth - 1)):\n in_channels = prev_channels\n\n bridge_channels = 2 ** (wf + i)\n bridge_channels = min(self.fm_cap, bridge_channels)\n\n out_channels = 2 ** (wf + i)\n out_channels = min(self.fm_cap, out_channels)\n #print (i, in_channels, bridge_channels, out_channels)\n self.up_path.append(\n UNetUpBlock(in_channels, bridge_channels, out_channels, up_mode, padding, normalization, use_skip=self.use_skip,\n legacy=self.legacy, double_pool=self.double_pool)\n )\n prev_channels = out_channels\n\n self.last = torch.nn.Conv2d(prev_channels, final_out_channels, kernel_size=1)\n\n def forward(self, x):\n blocks = []\n for i, down in enumerate(self.down_path):\n x = down(x)\n if i != len(self.down_path) - 1:\n blocks.append(x)\n x = torch.nn.functional.max_pool2d(x, 2)\n if self.double_pool:\n x = torch.nn.functional.max_pool2d(x, 2)\n\n\n for i, up in enumerate(self.up_path):\n x = up(x, blocks[-i - 1])\n\n return self.last(x)" }, { "identifier": "misc", "path": "shell/utils/misc.py", "snippet": "def depth_image_to_pointcloud3d(\n depth_image: np.ndarray,\n camera_k: np.ndarray,\n mask: np.ndarray = None,\n subsample: int = 1,\n):\ndef depth_coords_to_camera_points(\n coords: np.ndarray, z_vals: np.ndarray, camera_k: np.ndarray\n):\ndef flatten(l):\ndef get_pcd(obj):\ndef interpolate(img, res):\ndef persp2ortho(\n persp_depth,\n px_size,\n camera_k=None,\n view_angle=90,\n min_z_step=1e-2,\n upsample_factor=4,\n):\ndef perspective2orthogonal(\n persp_depth,\n px_size,\n camera_k=None,\n view_angle=90,\n min_z_step=1e-2,\n upsample_factor=4,\n):\ndef clean_up_visible_orth_depth(multilayer_depth, th=16):\ndef get_camera_k_from_angle(img_res, angle):\ndef adjust_camera_k_by_res(camera_k, original_res, new_res):\ndef get_visible_pcd(depth, camera_k, return_type=\"torch\"):\ndef get_np(t):\ndef permute(npt, order):\ndef crop(t, cx, cy):\ndef rolling_window(a, shape): # rolling window for 2D array\ndef get_indexed_ortho_shell_points(ortho_shell, px_size):\ndef get_ortho_layer_indexed_points(ortho_layer, px_size):\ndef crop_to_nonzero_data(img_stack, pad=1):\ndef get_ortho_shell_mesh(\n ortho_shell, px_size, speedcrop=True\n) -> o3d.geometry.TriangleMesh:\ndef persp2ortho_shell(persp_shell, px_size, camera_k):\ndef get_persp_shell_mesh(persp_shell, px_size, camera_k):\ndef get_device() -> torch.device:\ndef camera_k_and_shape_to_intrinsic(shape, camera_k):\ndef cropped_o3dpc(\n o3dpcd,\n bbox_minlims: List[float] = None, # len 3\n bbox_maxlims: List[float] = None, # len 3\n):\ndef o3d_pointcloud_native(\n depth_img: np.ndarray,\n camera_k: np.ndarray,\n extrinsic: np.ndarray = None,\n scale_factor: float = 1000.0,\n depth_trunc: float = 1000.0,\n bbox_minlims: List[float] = None,\n bbox_maxlims: List[float] = None,\n):\ndef camera_points_to_depth_coords(\n pc: np.ndarray, camera_k: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray]:\ndef pointcloud3d_to_depth_image(\n pc: np.ndarray, img_width: int, img_height: int, camera_k: np.ndarray\n) -> np.ndarray:\ndef get_masked_depth(\n object_id: int, mask: np.ndarray, depth: np.ndarray,\n) -> np.ndarray:" } ]
import torch import numpy as np from shell.models.utils import UNet from shell.utils import misc
1,754
""" Copyright (c) 2023 Samsung Electronics Co., Ltd. Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0/ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. For conditions of distribution and use, see the accompanying LICENSE file. """ class ShellReconstructorModel(torch.nn.Module): def __init__( self, device=None, ): super().__init__() self.height = 4 self.max_fms = 256 self.wf = 6 self.xy_size = 1.2 self.in_channels = 3 self.exit_only = False self.mask_channel = 1 self.depth_channel = 0 self.device = device
""" Copyright (c) 2023 Samsung Electronics Co., Ltd. Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0/ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. For conditions of distribution and use, see the accompanying LICENSE file. """ class ShellReconstructorModel(torch.nn.Module): def __init__( self, device=None, ): super().__init__() self.height = 4 self.max_fms = 256 self.wf = 6 self.xy_size = 1.2 self.in_channels = 3 self.exit_only = False self.mask_channel = 1 self.depth_channel = 0 self.device = device
self.unet = UNet(
0
2023-12-22 06:25:27+00:00
4k
SAITPublic/BiRF
test.py
[ { "identifier": "NGPradianceField", "path": "lib/models/ngp.py", "snippet": "class NGPradianceField(torch.nn.Module):\n def __init__(\n self,\n aabb: Union[torch.Tensor, List[float]],\n num_dim: int = 3,\n use_viewdirs: bool = True,\n density_activation: Callable = lambda x: trunc_exp(x - 1),\n geo_feat_dim: int = 15,\n max_deg: int = 2,\n n_features_per_level: int = 2,\n ) -> None:\n super().__init__()\n if not isinstance(aabb, torch.Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n self.register_buffer(\"aabb\", aabb)\n self.num_dim = num_dim\n self.use_viewdirs = use_viewdirs\n self.density_activation = density_activation\n self.geo_feat_dim = geo_feat_dim\n\n if self.use_viewdirs:\n self.direction_encoding = tcnn.Encoding(\n n_input_dims=num_dim,\n encoding_config={\n \"otype\": \"Composite\",\n \"nested\": [\n {\n \"n_dims_to_encode\": 3,\n \"otype\": \"SphericalHarmonics\",\n \"degree\": 4,\n },\n ],\n },\n )\n self.mlp_base = NetworkWithInputEncoding(\n n_input_dims=num_dim,\n n_output_dims=1 + self.geo_feat_dim,\n network_config={\n \"otype\": \"FullyFusedMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"None\",\n \"n_neurons\": 128,\n \"n_hidden_layers\": 1,\n },\n n_features_per_level=n_features_per_level,\n # log2_hashmap_size=19,\n )\n if self.geo_feat_dim > 0:\n self.mlp_head = tcnn.Network(\n n_input_dims=(\n (\n self.direction_encoding.n_output_dims\n if self.use_viewdirs\n else 0\n )\n + self.geo_feat_dim\n ),\n n_output_dims=3,\n network_config={\n \"otype\": \"FullyFusedMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"Sigmoid\",\n \"n_neurons\": 128,\n \"n_hidden_layers\": 2,\n },\n )\n\n def query_density(self, x, return_feat: bool = False):\n aabb_min, aabb_max = torch.split(self.aabb, self.num_dim, dim=-1)\n x = (x - aabb_min) / (aabb_max - aabb_min)\n selector = ((x > 0.0) & (x < 1.0)).all(dim=-1)\n x = (\n self.mlp_base(x.view(-1, self.num_dim))\n .view(list(x.shape[:-1]) + [1 + self.geo_feat_dim])\n .to(x)\n )\n density_before_activation, base_mlp_out = torch.split(\n x, [1, self.geo_feat_dim], dim=-1\n )\n density = (\n self.density_activation(density_before_activation)\n * selector[..., None]\n )\n if return_feat:\n return density, base_mlp_out\n else:\n return density\n\n def _query_rgb(self, x, dir, embedding):\n aabb_min, aabb_max = torch.split(self.aabb, self.num_dim, dim=-1)\n x = (x - aabb_min) / (aabb_max - aabb_min)\n \n # tcnn requires directions in the range [0, 1]\n if self.use_viewdirs:\n dir = (dir + 1.0) / 2.0\n d = self.direction_encoding(dir.view(-1, dir.shape[-1]))\n h = torch.cat([d, embedding.view(-1, self.geo_feat_dim)], dim=-1)\n else:\n h = embedding.view(-1, self.geo_feat_dim)\n \n rgb = (\n self.mlp_head(h)\n .view(list(embedding.shape[:-1]) + [3])\n .to(embedding)\n )\n return rgb\n\n def forward(\n self,\n positions: torch.Tensor,\n directions: torch.Tensor = None,\n ):\n if self.use_viewdirs and (directions is not None):\n assert (\n positions.shape == directions.shape\n ), f\"{positions.shape} v.s. {directions.shape}\"\n density, embedding = self.query_density(positions, return_feat=True)\n self.sparsity = torch.log(1.0 + density ** 2 / 0.5).mean()\n rgb = self._query_rgb(positions, directions, embedding=embedding)\n return rgb, density" }, { "identifier": "render_image", "path": "lib/utils.py", "snippet": "def render_image(\n # scene\n radiance_field: torch.nn.Module,\n occupancy_grid: OccupancyGrid,\n rays: Rays,\n scene_aabb: torch.Tensor,\n # rendering options\n near_plane: Optional[float] = None,\n far_plane: Optional[float] = None,\n render_step_size: float = 1e-3,\n render_bkgd: Optional[torch.Tensor] = None,\n cone_angle: float = 0.0,\n alpha_thre: float = 0.0,\n # test options\n test_chunk_size: int = 8192,\n):\n \"\"\"Render the pixels of an image.\"\"\"\n rays_shape = rays.origins.shape\n if len(rays_shape) == 3:\n height, width, _ = rays_shape\n num_rays = height * width\n rays = namedtuple_map(\n lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays\n )\n else:\n num_rays, _ = rays_shape\n\n def sigma_fn(t_starts, t_ends, ray_indices):\n t_origins = chunk_rays.origins[ray_indices]\n t_dirs = chunk_rays.viewdirs[ray_indices]\n positions = t_origins + t_dirs * (t_starts + t_ends) / 2.0\n return radiance_field.query_density(positions)\n\n def rgb_sigma_fn(t_starts, t_ends, ray_indices):\n t_origins = chunk_rays.origins[ray_indices]\n t_dirs = chunk_rays.viewdirs[ray_indices]\n positions = t_origins + t_dirs * (t_starts + t_ends) / 2.0\n return radiance_field(positions, t_dirs)\n\n results = []\n chunk = (\n torch.iinfo(torch.int32).max\n if radiance_field.training\n else test_chunk_size\n )\n for i in range(0, num_rays, chunk):\n chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays)\n ray_indices, t_starts, t_ends = ray_marching(\n chunk_rays.origins,\n chunk_rays.viewdirs,\n scene_aabb=scene_aabb,\n grid=occupancy_grid,\n sigma_fn=sigma_fn,\n near_plane=near_plane,\n far_plane=far_plane,\n render_step_size=render_step_size,\n stratified=radiance_field.training,\n cone_angle=cone_angle,\n alpha_thre=alpha_thre,\n )\n rgb, opacity, depth = rendering(\n t_starts,\n t_ends,\n ray_indices,\n n_rays=chunk_rays.origins.shape[0],\n rgb_sigma_fn=rgb_sigma_fn,\n render_bkgd=render_bkgd,\n )\n chunk_results = [rgb, opacity, depth, len(t_starts)]\n results.append(chunk_results)\n colors, opacities, depths, n_rendering_samples = [\n torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r\n for r in zip(*results)\n ]\n return (\n colors.view((*rays_shape[:-1], -1)),\n opacities.view((*rays_shape[:-1], -1)),\n depths.view((*rays_shape[:-1], -1)),\n sum(n_rendering_samples),\n )" }, { "identifier": "set_random_seed", "path": "lib/utils.py", "snippet": "def set_random_seed(seed, deterministic=False):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "load_dataset", "path": "lib/utils.py", "snippet": "def load_dataset(\n scene: str,\n data_root_fp: str,\n split: str,\n num_rays: Optional[int],\n dataset_kwargs: Dict,\n device: str,\n):\n if scene in [\"chair\", \"drums\", \"ficus\", \"hotdog\", \"lego\", \"materials\", \"mic\", \"ship\"]:\n from lib.datasets.nerf_synthetic import SubjectLoader\n data_root_fp = 'data/nerf_synthetic/'\n elif scene in [\"Bike\", \"Lifestyle\", \"Palace\", \"Robot\", \"Spaceship\", \"Steamtrain\", \"Toad\", \"Wineholder\"]:\n from lib.datasets.nsvf import SubjectLoader\n data_root_fp = 'data/Synthetic_NSVF/'\n elif scene in [\"Barn\", \"Caterpillar\", \"Family\", \"Ignatius\", \"Truck\"]:\n from lib.datasets.tanksandtemple import SubjectLoader\n data_root_fp = 'data/TanksAndTemple/'\n\n dataset = SubjectLoader(\n subject_id=scene,\n root_fp=data_root_fp,\n split=split,\n num_rays=num_rays,\n **dataset_kwargs,\n )\n\n dataset.images = dataset.images.to(device)\n dataset.camtoworlds = dataset.camtoworlds.to(device)\n dataset.K = dataset.K.to(device)\n\n return dataset, data_root_fp" }, { "identifier": "load_occgrid", "path": "lib/utils.py", "snippet": "def load_occgrid(occupancy_grid, save_path, device, res=128):\n data = np.load(f\"{save_path}/occgrid.npz\")['data']\n binary = np.unpackbits(data).reshape(res, res, res)\n binary = torch.tensor(binary).type(torch.bool).to(device)\n occupancy_grid._binary = binary\n \n return occupancy_grid" }, { "identifier": "load_model", "path": "lib/utils.py", "snippet": "def load_model(radiance_field, save_path, device):\n radiance_field.load_state_dict(torch.load(f\"{save_path}/network.ckpt\"), strict=False)\n encoding_params = np.load(f\"{save_path}/encoding.npz\")\n \n params_keys = [key for key in radiance_field.state_dict().keys() if key.startswith('mlp_base.encoding')]\n model_params = {}\n for key in params_keys:\n num = radiance_field.state_dict()[key].shape[0]\n params = np.unpackbits(encoding_params[key]).astype(np.float16)[:num]\n model_params[key] = torch.tensor(2 * params - 1).to(device)\n\n radiance_field.load_state_dict(model_params, strict=False)\n\n return radiance_field" } ]
import argparse import math import os import time import json import gin import imageio import numpy as np import torch import torch.nn.functional as F import tqdm from typing import * from datetime import datetime from torchmetrics import StructuralSimilarityIndexMeasure from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity from lib.models.ngp import NGPradianceField from lib.utils import render_image, set_random_seed, load_dataset, load_occgrid, load_model from nerfacc import ContractionType, OccupancyGrid
3,562
class ExtendAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): items = getattr(namespace, self.dest) or [] items.extend(values) setattr(namespace, self.dest, items) def parse_args(): parser = argparse.ArgumentParser() parser.register('action', 'extend', ExtendAction) parser.add_argument( "configs", action="append", help="path to config files", ) parser.add_argument( "--bind", nargs='+', action="extend", help="param to bind", ) parser.add_argument( "--scene", type=str, required=True, choices=[ # nerf synthetic "chair", "drums", "ficus", "hotdog", "lego", "materials", "mic", "ship", # nsvf synthetic "Bike", "Lifestyle", "Palace", "Robot", "Spaceship", "Steamtrain", "Toad", "Wineholder", # nsvf TankAndTemple "Barn", "Caterpillar", "Family", "Ignatius", "Truck", ], help="which scene to use", ) parser.add_argument( "--n_features", type=int, default=2, help="number of features" ) parser.add_argument( "--seed", type=int, default=0, help="random seed number" ) parser.add_argument( "--ckpt_dir", type=str, default=None, help="path for checkpoint directory" ) return parser.parse_args() @gin.configurable def main( scene: str, ckpt_dir: str, n_features: int=2, seed: int = 2023, log_dir: str = "./logs", prefix: Optional[str] = None, postfix: Optional[str] = None, max_steps: int = 20000, render_n_samples: int = 1024, test_chunk_size: int = 16384, aabb: List[float] = [-1.5, -1.5, -1.5, 1.5, 1.5, 1.5], data_root_fp: str = "data/nerf_synthetic/", train_split: str = "train", cone_angle: float = 0.0, sparsity_weight: float = 2e-5, render_per_frame: int = -1, ): # log save_path = f"{log_dir}/{scene}" if ckpt_dir == None else ckpt_dir if prefix is not None: save_path = f"{prefix}_{save_path}" if postfix is not None: save_path = f"{save_path}_{postfix}" save_path = f"{save_path}_{n_features}" print(f'Evaluation for pretrained model in "{save_path}"') results = {} # setup the dataset test_dataset_kwargs = {} target_sample_batch_size = 1 << 18 grid_resolution = 128
""" "Copyright (C) 2021 Samsung Electronics Co. LTD This software is a property of Samsung Electronics. No part of this software, either material or conceptual may be copied or distributed, transmitted, transcribed, stored in a retrieval system, or translated into any human or computer language in any form by any means, electronic, mechanical, manual or otherwise, or disclosed to third parties without the express written permission of Samsung Electronics. (Use of the Software is restricted to non-commercial, personal or academic, research purpose only)" """ """ Modified from NerfAcc (https://github.com/KAIR-BAIR/nerfacc) Copyright (c) 2022 Ruilong Li, UC Berkeley. """ class ExtendAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): items = getattr(namespace, self.dest) or [] items.extend(values) setattr(namespace, self.dest, items) def parse_args(): parser = argparse.ArgumentParser() parser.register('action', 'extend', ExtendAction) parser.add_argument( "configs", action="append", help="path to config files", ) parser.add_argument( "--bind", nargs='+', action="extend", help="param to bind", ) parser.add_argument( "--scene", type=str, required=True, choices=[ # nerf synthetic "chair", "drums", "ficus", "hotdog", "lego", "materials", "mic", "ship", # nsvf synthetic "Bike", "Lifestyle", "Palace", "Robot", "Spaceship", "Steamtrain", "Toad", "Wineholder", # nsvf TankAndTemple "Barn", "Caterpillar", "Family", "Ignatius", "Truck", ], help="which scene to use", ) parser.add_argument( "--n_features", type=int, default=2, help="number of features" ) parser.add_argument( "--seed", type=int, default=0, help="random seed number" ) parser.add_argument( "--ckpt_dir", type=str, default=None, help="path for checkpoint directory" ) return parser.parse_args() @gin.configurable def main( scene: str, ckpt_dir: str, n_features: int=2, seed: int = 2023, log_dir: str = "./logs", prefix: Optional[str] = None, postfix: Optional[str] = None, max_steps: int = 20000, render_n_samples: int = 1024, test_chunk_size: int = 16384, aabb: List[float] = [-1.5, -1.5, -1.5, 1.5, 1.5, 1.5], data_root_fp: str = "data/nerf_synthetic/", train_split: str = "train", cone_angle: float = 0.0, sparsity_weight: float = 2e-5, render_per_frame: int = -1, ): # log save_path = f"{log_dir}/{scene}" if ckpt_dir == None else ckpt_dir if prefix is not None: save_path = f"{prefix}_{save_path}" if postfix is not None: save_path = f"{save_path}_{postfix}" save_path = f"{save_path}_{n_features}" print(f'Evaluation for pretrained model in "{save_path}"') results = {} # setup the dataset test_dataset_kwargs = {} target_sample_batch_size = 1 << 18 grid_resolution = 128
test_dataset, data_root_fp = load_dataset(
3
2023-12-28 02:08:29+00:00
4k
pkariz/grin-explorer
backend/api/models.py
[ { "identifier": "NodeV2API", "path": "backend/api/node.py", "snippet": "class NodeV2API:\n def __init__(self, node):\n self.foreign_api_url = node.api_url\n self.foreign_api_user = node.api_username\n self.foreign_api_password = node.api_password\n self._cached_blocks = {}\n \n\n def post(self, method, params):\n payload = {\n 'jsonrpc': '2.0',\n 'id': 1,\n 'method': method,\n 'params': params\n }\n\n response = requests.post(\n self.foreign_api_url,\n json=payload, \n auth=(self.foreign_api_user, self.foreign_api_password),\n # long read timeout because of node's compaction process\n timeout=(5, 60)\n )\n\n if response.status_code >= 300 or response.status_code < 200:\n # Requests-level error\n raise NodeError(\n method, params, response.status_code, response.reason)\n response_json = response.json()\n\n # https://github.com/mimblewimble/grin-rfcs/blob/master/text/0007-node-api-v2.md#errors\n if \"error\" in response_json:\n # One version of a node error\n raise NodeError(\n method, params,\n response_json[\"error\"][\"code\"],\n response_json[\"error\"][\"message\"]\n )\n if \"Err\" in response_json:\n # Another version of a node error\n raise NodeError(\n method, params, None, response_json[\"result\"][\"Err\"])\n return response_json\n\n def get_tip(self):\n resp = self.post('get_tip', [])\n return resp[\"result\"][\"Ok\"]\n \n def get_kernel(self, excess, min_height=None, max_height=None):\n resp = self.post('get_kernel', [excess, min_height, max_height])\n return resp[\"result\"][\"Ok\"]\n\n def get_header(self, height=None, hash=None, commit=None):\n resp = self.post('get_header', [height, hash, commit])\n return resp[\"result\"][\"Ok\"]\n\n def get_block(self, height=None, hash=None, commit=None):\n resp = self.post('get_block', [height, hash, commit])\n res = resp['result']\n try:\n return resp[\"result\"][\"Ok\"]\n except KeyError:\n if 'Err' in resp['result'] and resp['result']['Err'] == 'NotFound':\n logger.warning(\n 'NodeBlockNotFoundException',\n extra={ 'height': height, 'hash': hash },\n )\n raise NodeBlockNotFoundException()\n log_data = json.dumps(resp)\n logger.error('NodeUnknownException', extra={ 'result': log_data })\n raise NodeUnknownException()\n\n def get_blocks(self, start_height, end_height, limit=1000, proofs=True):\n if start_height < 0:\n raise Exception('Starting height must >= 0.')\n if not 1 <= limit <= 1000:\n raise Exception('Limit must be between 1 and 1000.')\n resp = self.post('get_blocks', [start_height, end_height, limit, proofs])\n res = resp['result']\n try:\n return resp[\"result\"][\"Ok\"]\n except KeyError:\n if 'Err' in resp['result'] and resp['result']['Err'] == 'NotFound':\n logger.warning(\n 'NodeBlocksFetchException',\n extra={ 'start_height': start_height, 'end_height': end_height },\n )\n raise NodeBlocksFetchException()\n log_data = json.dumps(resp)\n logger.error('NodeUnknownException', extra={ 'result': log_data })\n raise NodeUnknownException()" }, { "identifier": "NodeError", "path": "backend/api/node.py", "snippet": "class NodeError(Exception):\n def __init__(self, method, params, code, reason):\n self.method = method\n self.params = params\n self.code = code # may be None, not all errors have a code\n self.reason = reason\n super().__init__(self.reason)\n\n def __str__(self):\n return (\n f'Calling node foreign api {self.method} with params {self.params} '\n f'failed with error code {self.code} because: {self.reason}'\n )" } ]
from asgiref.sync import async_to_sync from channels.layers import get_channel_layer from django.conf import settings from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.contrib.postgres.fields import ArrayField from django.core.validators import ( MinLengthValidator, MinValueValidator, MaxValueValidator, ) from django.db import models, transaction from django.db.models import Q from model_utils.models import TimeStampedModel from slugify import slugify from requests.exceptions import ( Timeout as RequestsTimeout, ConnectionError as RequestsConnectionError, HTTPError as RequestsHTTPError, ReadTimeout as RequestsReadTimeout ) from .node import NodeV2API, NodeError from .bootstrap import load_blocks from .models import Block, BlockHeader, Input, Output, Kernel, DramatiqTask, Reorg from django.contrib.contenttypes.models import ContentType from decimal import Decimal from .serializers import DramatiqTaskSerializer import logging
2,106
logger = logging.getLogger(__name__) class NodeGroup(models.Model): """ NodeGroup represents a group of nodes. These nodes should be on the same network.: """ id = models.BigAutoField(primary_key=True) # name is probably mainnet, testnet or smth similar name = models.CharField(max_length=255, unique=True) # by default that's slug of the name slug = models.SlugField(max_length=255, unique=True) def __str__(self): return self.name def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.name, to_lower=True) else: self.slug = self.slug.lower() self.full_clean() return super().save(*args, **kwargs) class Node(TimeStampedModel): """Node on the network. Currently it only supports grin-rust.""" id = models.BigAutoField(primary_key=True) # name can be whatever name = models.CharField(max_length=255, unique=True) # by default that's slug of the name slug = models.SlugField(max_length=255, unique=True) group = models.ForeignKey( NodeGroup, related_name='nodes', on_delete=models.PROTECT) # foreign api url of the grin-rust node api_url = models.URLField() # username of the grin-rust node api_username = models.CharField(max_length=255) # foreign api secret of the grin-rust node api_password = models.CharField(max_length=255) # if archive is true then we fetch every block when we bootstrap, otherwise # we fetch only latest 1440 blocks (1 day) archive = models.BooleanField(default=False) def __str__(self): repr = f'{self.name}' if self.archive: repr += ' (archive)' return repr def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.name, to_lower=True) else: self.slug = self.slug.lower() return super().save(*args, **kwargs) def is_reachable(self): try: NodeV2API(self).get_tip() return True except ( RequestsConnectionError, RequestsTimeout, RequestsHTTPError, RequestsReadTimeout ): logger.exception('Node unreachable', extra={'node': self.slug}) return False class Blockchain(TimeStampedModel): id = models.BigAutoField(primary_key=True) # testnet, mainnet etc name = models.CharField(max_length=255, unique=True) # slug of the name, we use it in url slug = models.SlugField(max_length=255, unique=True) # node from which the data is fetched node = models.ForeignKey( Node, related_name='blockchains', on_delete=models.PROTECT) # the default blockchain will be picked on the gui by default default = models.BooleanField(default=False) # if fetch_price is False then the shown price will always be 0. # Testnets and localnets should have this set to false. fetch_price = models.BooleanField(default=True) # load_progress shows current % of loaded blocks. If archive is True then # load_progress will represent % of missing all blocks, otherwise % of # missing blocks from the latest 1440 blocks load_progress = models.DecimalField( max_digits=5, decimal_places=2, default=0.0, validators=[MinValueValidator(0), MaxValueValidator(100)] ) def __str__(self): return f'{self.name} - {self.load_progress} [Node<{self.node}>]' def bootstrap(self, skip_reorg_check=False): # import here to avoid cyclic import start_height, end_height = self.get_bootstrap_heights() load_blocks(self, start_height, end_height, skip_reorg_check) def get_tip_height(self): node_api = NodeV2API(self.node) try: end_block = node_api.get_tip()['height']
logger = logging.getLogger(__name__) class NodeGroup(models.Model): """ NodeGroup represents a group of nodes. These nodes should be on the same network.: """ id = models.BigAutoField(primary_key=True) # name is probably mainnet, testnet or smth similar name = models.CharField(max_length=255, unique=True) # by default that's slug of the name slug = models.SlugField(max_length=255, unique=True) def __str__(self): return self.name def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.name, to_lower=True) else: self.slug = self.slug.lower() self.full_clean() return super().save(*args, **kwargs) class Node(TimeStampedModel): """Node on the network. Currently it only supports grin-rust.""" id = models.BigAutoField(primary_key=True) # name can be whatever name = models.CharField(max_length=255, unique=True) # by default that's slug of the name slug = models.SlugField(max_length=255, unique=True) group = models.ForeignKey( NodeGroup, related_name='nodes', on_delete=models.PROTECT) # foreign api url of the grin-rust node api_url = models.URLField() # username of the grin-rust node api_username = models.CharField(max_length=255) # foreign api secret of the grin-rust node api_password = models.CharField(max_length=255) # if archive is true then we fetch every block when we bootstrap, otherwise # we fetch only latest 1440 blocks (1 day) archive = models.BooleanField(default=False) def __str__(self): repr = f'{self.name}' if self.archive: repr += ' (archive)' return repr def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.name, to_lower=True) else: self.slug = self.slug.lower() return super().save(*args, **kwargs) def is_reachable(self): try: NodeV2API(self).get_tip() return True except ( RequestsConnectionError, RequestsTimeout, RequestsHTTPError, RequestsReadTimeout ): logger.exception('Node unreachable', extra={'node': self.slug}) return False class Blockchain(TimeStampedModel): id = models.BigAutoField(primary_key=True) # testnet, mainnet etc name = models.CharField(max_length=255, unique=True) # slug of the name, we use it in url slug = models.SlugField(max_length=255, unique=True) # node from which the data is fetched node = models.ForeignKey( Node, related_name='blockchains', on_delete=models.PROTECT) # the default blockchain will be picked on the gui by default default = models.BooleanField(default=False) # if fetch_price is False then the shown price will always be 0. # Testnets and localnets should have this set to false. fetch_price = models.BooleanField(default=True) # load_progress shows current % of loaded blocks. If archive is True then # load_progress will represent % of missing all blocks, otherwise % of # missing blocks from the latest 1440 blocks load_progress = models.DecimalField( max_digits=5, decimal_places=2, default=0.0, validators=[MinValueValidator(0), MaxValueValidator(100)] ) def __str__(self): return f'{self.name} - {self.load_progress} [Node<{self.node}>]' def bootstrap(self, skip_reorg_check=False): # import here to avoid cyclic import start_height, end_height = self.get_bootstrap_heights() load_blocks(self, start_height, end_height, skip_reorg_check) def get_tip_height(self): node_api = NodeV2API(self.node) try: end_block = node_api.get_tip()['height']
except NodeError as e:
1
2023-12-24 22:15:11+00:00
4k
Rubics-Xuan/Med-DANet
utils/predict.py
[ { "identifier": "slide_window_2D_only_output", "path": "utils/slide_test.py", "snippet": "def slide_window_2D_only_output(ori_img, crop_size, model):\n\n stride_rate = 1.0/3.0\n stride = int(crop_size * stride_rate) # default = 85\n batch, classes, origin_h, origin_w = ori_img.size()\n\n with torch.cuda.device_of(ori_img):\n outputs = ori_img.new().resize_(batch, classes, origin_h, origin_w).zero_().cuda()\n count_norm = ori_img.new().resize_(batch, 1, origin_h, origin_w).zero_().cuda()\n\n h_grids = int(math.ceil(1.0 * (origin_h - crop_size) / stride)) + 1\n w_grids = int(math.ceil(1.0 * (origin_w - crop_size) / stride)) + 1\n\n for idh in range(h_grids): # 3\n for idw in range(w_grids):\n h0 = idh * stride\n w0 = idw * stride\n h1 = min(h0 + crop_size, origin_h)\n w1 = min(w0 + crop_size, origin_w)\n\n #adjustment\n if h1 == origin_h:\n h0 = h1 - crop_size\n if w1 == origin_w:\n w0 = w1 - crop_size\n\n crop_img = crop_image_2D(ori_img, h0, h1, w0, w1).cuda()\n output = model_inference_only_output(model, crop_img).cuda()\n outputs[:, :, h0:h1, w0:w1] += crop_image_2D(output, 0, h1 - h0, 0, w1 - w0)\n count_norm[:, :, h0:h1, w0:w1] += 1\n assert ((count_norm == 0).sum() == 0)\n outputs = outputs / count_norm\n outputs = outputs[:, :, :origin_h, :origin_w]\n outputs = F.softmax(outputs, 1)\n return outputs" }, { "identifier": "slide_window_2D_out_gflops", "path": "utils/slide_test.py", "snippet": "def slide_window_2D_out_gflops(ori_img, crop_size, model):\n\n stride_rate = 1.0/3.0\n stride = int(crop_size * stride_rate) # default = 85\n batch, classes, origin_h, origin_w = ori_img.size()\n\n with torch.cuda.device_of(ori_img):\n outputs = ori_img.new().resize_(batch, classes, origin_h, origin_w).zero_().cuda()\n count_norm = ori_img.new().resize_(batch, 1, origin_h, origin_w).zero_().cuda()\n\n h_grids = int(math.ceil(1.0 * (origin_h - crop_size) / stride)) + 1\n w_grids = int(math.ceil(1.0 * (origin_w - crop_size) / stride)) + 1\n gflops_slice = 0\n\n for idh in range(h_grids): # 3\n for idw in range(w_grids):\n h0 = idh * stride\n w0 = idw * stride\n h1 = min(h0 + crop_size, origin_h)\n w1 = min(w0 + crop_size, origin_w)\n\n #adjustment\n if h1 == origin_h:\n h0 = h1 - crop_size\n if w1 == origin_w:\n w0 = w1 - crop_size\n\n crop_img = crop_image_2D(ori_img, h0, h1, w0, w1).cuda()\n\n output, gflops = model_inference_out_gflops(model, crop_img)\n output, gflops = output.cuda(), gflops.cuda()\n outputs[:, :, h0:h1, w0:w1] += crop_image_2D(output, 0, h1 - h0, 0, w1 - w0)\n count_norm[:, :, h0:h1, w0:w1] += 1\n gflops_slice += gflops\n assert ((count_norm == 0).sum() == 0)\n outputs = outputs / count_norm\n outputs = outputs[:, :, :origin_h, :origin_w]\n outputs = F.softmax(outputs, 1)\n return outputs, gflops_slice" } ]
import os import time import logging import torch import torch.nn.functional as F import torch.backends.cudnn as cudnn import numpy as np import cv2 import nibabel as nib import imageio import scipy.misc import SimpleITK as sitk from utils.slide_test import slide_window_2D_only_output, slide_window_2D_out_gflops
2,379
index_list = (ori == j).nonzero() for i in range(len(index_list)): batch, height, width, depth = index_list[i] new_gd[batch, j, height, width, depth] = 1 return new_gd.float() def tailor_and_concat(x, model): temp = [] temp.append(x[..., :128, :128, :128]) temp.append(x[..., :128, 112:240, :128]) temp.append(x[..., 112:240, :128, :128]) temp.append(x[..., 112:240, 112:240, :128]) temp.append(x[..., :128, :128, 27:155]) temp.append(x[..., :128, 112:240, 27:155]) temp.append(x[..., 112:240, :128, 27:155]) temp.append(x[..., 112:240, 112:240, 27:155]) y = x.clone() for i in range(len(temp)): # temp[i] = model(temp[i]) temp[i] = model(temp[i]) y[..., :128, :128, :128] = temp[0] y[..., :128, 128:240, :128] = temp[1][..., :, 16:128, :] y[..., 128:240, :128, :128] = temp[2][..., 16:128, :, :] y[..., 128:240, 128:240, :128] = temp[3][..., 16:128, 16:128, :] y[..., :128, :128, 128:155] = temp[4][..., 96:123] y[..., :128, 128:240, 128:155] = temp[5][..., :, 16:128, 96:123] y[..., 128:240, :128, 128:155] = temp[6][..., 16:128, :, 96:123] y[..., 128:240, 128:240, 128:155] = temp[7][..., 16:128, 16:128, 96:123] return y[..., :155] def dice_score(o, t, eps=1e-8): num = 2*(o*t).sum() + eps den = o.sum() + t.sum() + eps return num/den def mIOU(o, t, eps=1e-8): num = (o*t).sum() + eps den = (o | t).sum() + eps return num/den def softmax_mIOU_score(output, target): mIOU_score = [] mIOU_score.append(mIOU(o=(output==1),t=(target==1))) mIOU_score.append(mIOU(o=(output==2),t=(target==2))) mIOU_score.append(mIOU(o=(output==3),t=(target==4))) return mIOU_score def softmax_output_dice(output, target): ret = [] # whole o = output > 0; t = target > 0 # ce ret += dice_score(o, t), # core o = (output == 1) | (output == 3) t = (target == 1) | (target == 4) ret += dice_score(o, t), # active o = (output == 3);t = (target == 4) ret += dice_score(o, t), return ret keys = 'whole', 'core', 'enhancing', 'loss' def validate_softmax( valid_loader, model, heatmap_use=True, heatmap_dir='', savepath='', # when in validation set, you must specify the path to save the 'nii' segmentation results here names=None, # The names of the patients orderly! verbose=False, save_format=None, # ['nii','npy'], use 'nii' as default. Its purpose is for submission. snapshot=False, # for visualization. Default false. It is recommended to generate the visualized figures. visual='', # the path to save visualization postprocess=False, # Default False, when use postprocess, the score of dice_ET would be changed. valid_in_train=False, # if you are valid when train ): H, W, T = 240, 240, 160 model.eval() WT_LIST, TC_LIST, ET_LIST, flops_sample_list = [], [], [], [] runtimes = [] for i, data in enumerate(valid_loader): print('-------------------------------------------------------------------') msg = 'Subject {}/{}, '.format(i + 1, len(valid_loader)) if valid_in_train: target_cpu = data[1][0, :H, :W, :T].numpy() data = [t.cuda(non_blocking=True) for t in data] x, target = data[:2] else: x = data x.cuda() flops_sample = 0 torch.cuda.synchronize() # add the code synchronize() to correctly count the runtime. start_time = time.time() x = x[..., :155] output = x.clone().cpu().detach().numpy() print('start to predict segmentation!!') for s in range(155): x_s = x[..., s].cuda() x_origin = x_s
cudnn.benchmark = True def one_hot(ori, classes): batch, h, w, d = ori.size() new_gd = torch.zeros((batch, classes, h, w, d), dtype=ori.dtype).cuda() for j in range(classes): index_list = (ori == j).nonzero() for i in range(len(index_list)): batch, height, width, depth = index_list[i] new_gd[batch, j, height, width, depth] = 1 return new_gd.float() def tailor_and_concat(x, model): temp = [] temp.append(x[..., :128, :128, :128]) temp.append(x[..., :128, 112:240, :128]) temp.append(x[..., 112:240, :128, :128]) temp.append(x[..., 112:240, 112:240, :128]) temp.append(x[..., :128, :128, 27:155]) temp.append(x[..., :128, 112:240, 27:155]) temp.append(x[..., 112:240, :128, 27:155]) temp.append(x[..., 112:240, 112:240, 27:155]) y = x.clone() for i in range(len(temp)): # temp[i] = model(temp[i]) temp[i] = model(temp[i]) y[..., :128, :128, :128] = temp[0] y[..., :128, 128:240, :128] = temp[1][..., :, 16:128, :] y[..., 128:240, :128, :128] = temp[2][..., 16:128, :, :] y[..., 128:240, 128:240, :128] = temp[3][..., 16:128, 16:128, :] y[..., :128, :128, 128:155] = temp[4][..., 96:123] y[..., :128, 128:240, 128:155] = temp[5][..., :, 16:128, 96:123] y[..., 128:240, :128, 128:155] = temp[6][..., 16:128, :, 96:123] y[..., 128:240, 128:240, 128:155] = temp[7][..., 16:128, 16:128, 96:123] return y[..., :155] def dice_score(o, t, eps=1e-8): num = 2*(o*t).sum() + eps den = o.sum() + t.sum() + eps return num/den def mIOU(o, t, eps=1e-8): num = (o*t).sum() + eps den = (o | t).sum() + eps return num/den def softmax_mIOU_score(output, target): mIOU_score = [] mIOU_score.append(mIOU(o=(output==1),t=(target==1))) mIOU_score.append(mIOU(o=(output==2),t=(target==2))) mIOU_score.append(mIOU(o=(output==3),t=(target==4))) return mIOU_score def softmax_output_dice(output, target): ret = [] # whole o = output > 0; t = target > 0 # ce ret += dice_score(o, t), # core o = (output == 1) | (output == 3) t = (target == 1) | (target == 4) ret += dice_score(o, t), # active o = (output == 3);t = (target == 4) ret += dice_score(o, t), return ret keys = 'whole', 'core', 'enhancing', 'loss' def validate_softmax( valid_loader, model, heatmap_use=True, heatmap_dir='', savepath='', # when in validation set, you must specify the path to save the 'nii' segmentation results here names=None, # The names of the patients orderly! verbose=False, save_format=None, # ['nii','npy'], use 'nii' as default. Its purpose is for submission. snapshot=False, # for visualization. Default false. It is recommended to generate the visualized figures. visual='', # the path to save visualization postprocess=False, # Default False, when use postprocess, the score of dice_ET would be changed. valid_in_train=False, # if you are valid when train ): H, W, T = 240, 240, 160 model.eval() WT_LIST, TC_LIST, ET_LIST, flops_sample_list = [], [], [], [] runtimes = [] for i, data in enumerate(valid_loader): print('-------------------------------------------------------------------') msg = 'Subject {}/{}, '.format(i + 1, len(valid_loader)) if valid_in_train: target_cpu = data[1][0, :H, :W, :T].numpy() data = [t.cuda(non_blocking=True) for t in data] x, target = data[:2] else: x = data x.cuda() flops_sample = 0 torch.cuda.synchronize() # add the code synchronize() to correctly count the runtime. start_time = time.time() x = x[..., :155] output = x.clone().cpu().detach().numpy() print('start to predict segmentation!!') for s in range(155): x_s = x[..., s].cuda() x_origin = x_s
logit, gflops_slice = slide_window_2D_out_gflops(x_s, crop_size=128, model=model) # no flip
1
2023-12-28 07:26:55+00:00
4k
the-seeds/cardinal
src/cardinal/utils/builder.py
[ { "identifier": "BaseExtractor", "path": "src/cardinal/core/extractor/base_extractor.py", "snippet": "class BaseExtractor(Extractor):\n def __init__(\n self, vectorizer: \"EmbedOpenAI\", storage: \"StringKeyedStorage[Leaf]\", vectorstore: \"VectorStore[LeafIndex]\"\n ) -> None:\n self._vectorizer = vectorizer\n self._storage = storage\n self._vectorstore = vectorstore\n self._splitter = CJKTextSplitter()\n\n def load(self, input_files: List[Path], user_id: str, verbose: Optional[bool] = False) -> None:\n file_contents: List[str] = []\n for file_path in tqdm(input_files, desc=\"Extract content\", disable=(not verbose)):\n if file_path.suffix == \".txt\":\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n file_contents.append(f.read())\n else:\n raise NotImplementedError\n\n text_chunks = []\n with Pool(processes=int(os.environ.get(\"NUM_CPU_CORE\"))) as pool:\n for chunks in tqdm(\n pool.imap_unordered(self._splitter.split, file_contents),\n total=len(file_contents),\n desc=\"Split content\",\n disable=(not verbose),\n ):\n text_chunks.extend(chunks)\n\n leaf_indexes = []\n for chunk in tqdm(text_chunks, desc=\"Build index\", disable=(not verbose)):\n leaf_index = LeafIndex(user_id=user_id)\n leaf = Leaf(content=chunk, leaf_id=leaf_index.leaf_id, user_id=user_id)\n self._storage.insert(leaf.leaf_id, leaf)\n leaf_indexes.append(leaf_index)\n\n text_batches = []\n for i in range(0, len(text_chunks), self._vectorizer.batch_size):\n text_batches.append(text_chunks[i : i + self._vectorizer.batch_size])\n\n embeddings = []\n for batch_text in tqdm(text_batches, desc=\"Get embeddings\", disable=(not verbose)):\n embeddings.extend(self._vectorizer.batch_embed(batch_text))\n\n self._vectorstore.insert(embeddings, leaf_indexes)" }, { "identifier": "get_logger", "path": "src/cardinal/core/logging/logger.py", "snippet": "def get_logger(name: str) -> logging.Logger:\n r\"\"\"\n Gets a standard logger with a stream hander to stdout.\n \"\"\"\n formatter = logging.Formatter(\n fmt=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\", datefmt=\"%m/%d/%Y %H:%M:%S\"\n )\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n logger.addHandler(handler)\n\n return logger" }, { "identifier": "EmbedOpenAI", "path": "src/cardinal/core/model/embed_openai.py", "snippet": "class EmbedOpenAI:\n def __init__(self, batch_size: Optional[int] = 1000) -> None:\n self.model = os.environ.get(\"EMBED_MODEL\")\n self.batch_size = batch_size\n self._client = OpenAI(max_retries=5, timeout=30.0)\n\n @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(5))\n def _get_embeddings(self, batch_text: List[str]) -> List[List[float]]:\n # replace newlines, which can negatively affect performance\n batch_text = [text.replace(\"\\n\", \" \") for text in batch_text]\n data = self._client.embeddings.create(input=batch_text, model=self.model).data\n return [d.embedding for d in data]\n\n def batch_embed(self, texts: List[str]) -> List[List[float]]:\n embeddings = []\n for i in range(0, len(texts), self.batch_size):\n embeddings.extend(self._get_embeddings(texts[i : i + self.batch_size]))\n return embeddings" }, { "identifier": "Leaf", "path": "src/cardinal/core/schema/leaf.py", "snippet": "class Leaf(LeafIndex):\n content: str" }, { "identifier": "LeafIndex", "path": "src/cardinal/core/schema/leaf.py", "snippet": "class LeafIndex(BaseModel):\n leaf_id: str = Field(default_factory=lambda: uuid.uuid4().hex)\n user_id: str" }, { "identifier": "RedisStorage", "path": "src/cardinal/core/storage/redis.py", "snippet": "class RedisStorage(StringKeyedStorage[V]):\n def __init__(self, name: str) -> None:\n self.name = name\n self.database = Redis.from_url(url=os.environ.get(\"REDIS_URI\"))\n self._unique_key = \"unique_{}\".format(name)\n\n try:\n self.database.ping()\n except redis.ConnectionError:\n raise Exception(\"Unable to connect with the Redis server.\")\n\n def insert(self, key: str, value: V) -> None:\n encoded_value = pickle.dumps(value)\n self.database.hset(self.name, key, encoded_value)\n\n def query(self, key: str) -> V:\n encoded_value = self.database.hget(self.name, key)\n return pickle.loads(encoded_value)\n\n def unique_incr(self) -> None:\n self.database.incr(self._unique_key)\n\n def unique_get(self) -> int:\n value = self.database.get(self._unique_key)\n if isinstance(value, bytes):\n return int(value.decode(\"utf-8\"))\n return 0\n\n def unique_reset(self) -> None:\n self.database.delete(self._unique_key)" }, { "identifier": "Chroma", "path": "src/cardinal/core/vectorstore/chroma.py", "snippet": "class Chroma(VectorStore[V]):\n def __init__(self, name: str) -> None:\n client = _get_chroma_client()\n self.store = client.get_or_create_collection(name, embedding_function=None)\n self._batch_size = 1000\n self._data_field = \"data\"\n\n @classmethod\n def create(cls, name: str, embeddings: List[K], data: List[V], drop_old: Optional[bool] = False) -> Self:\n if drop_old:\n client = _get_chroma_client()\n try:\n client.delete_collection(name)\n except Exception:\n pass\n\n chroma = cls(name=name)\n chroma.insert(embeddings, data)\n return chroma\n\n def insert(self, embeddings: List[K], data: List[V]) -> None:\n ids = []\n metadatas = []\n for example in data:\n ids.append(uuid.uuid4().hex)\n example_dict = {}\n for k, v in example.model_dump().items():\n if isinstance(v, (str, int, float, bool)):\n example_dict[k] = v\n example_dict[self._data_field] = base64.b64encode(pickle.dumps(example)).decode(\"ascii\")\n metadatas.append(example_dict)\n\n total_count = len(metadatas)\n for i in range(0, total_count, self._batch_size):\n self.store.add(\n ids=ids[i : i + self._batch_size],\n embeddings=embeddings[i : i + self._batch_size],\n metadatas=metadatas[i : i + self._batch_size],\n )\n\n def delete(self, condition: ChromaCondition) -> None:\n return self.store.delete(where=condition.to_filter())\n\n def search(\n self, embedding: K, top_k: Optional[int] = 4, condition: Optional[ChromaCondition] = None\n ) -> List[Tuple[V, float]]:\n result = self.store.query(\n query_embeddings=[embedding],\n n_results=top_k,\n where=condition.to_filter() if condition is not None else None,\n include=[\"metadatas\", \"distances\"],\n )\n\n ret = []\n for metadata, score in zip(result[\"metadatas\"][0], result[\"distances\"][0]):\n example = pickle.loads(base64.b64decode(metadata[self._data_field]))\n ret.append((example, score))\n return ret" } ]
import os from pathlib import Path from ..core.extractor import BaseExtractor from ..core.logging import get_logger from ..core.model import EmbedOpenAI from ..core.schema import Leaf, LeafIndex from ..core.storage import RedisStorage from ..core.vectorstore import Chroma
1,954
logger = get_logger(__name__) def build_database(folder: Path, database: str) -> None: input_files = [] for path in folder.rglob("*.*"): if path.is_file() and path.suffix == ".txt": input_files.append(path) extractor = BaseExtractor( vectorizer=EmbedOpenAI(),
logger = get_logger(__name__) def build_database(folder: Path, database: str) -> None: input_files = [] for path in folder.rglob("*.*"): if path.is_file() and path.suffix == ".txt": input_files.append(path) extractor = BaseExtractor( vectorizer=EmbedOpenAI(),
storage=RedisStorage[Leaf](name=database),
3
2023-12-26 14:16:40+00:00
4k
datrocity/pond
tests/artifact/test_artifact_registry.py
[ { "identifier": "Artifact", "path": "pond/artifact/artifact.py", "snippet": "class Artifact(ABC):\n \"\"\" Knows how to read and write one type of artifact.\n\n Concrete Artifact implementation should save the metadata with the data if possible,\n so that the artifact is self-contained even if, for instance, it is sent by email.\n \"\"\"\n\n # --- Artifact class interface\n\n # todo: what is the class_id for?\n\n @classmethod\n def class_id(cls):\n \"\"\" String ID to be able to find this class from its name. \"\"\"\n return cls.__name__\n\n @classmethod\n def subclass_from_id(cls, class_id: str) -> Type['Artifact']:\n \"\"\" Find a subclass from its class ID. \"\"\"\n subclasses = cls.__subclasses__()\n for subclass in subclasses:\n if subclass.class_id() == class_id:\n break\n else:\n # todo this exception is not defined here\n raise InvalidArtifactClass(class_id)\n return subclass\n\n # --- Artifact public interface\n\n def __init__(self, data, metadata=None):\n \"\"\" Create an Artifact.\n\n Parameters\n ----------\n data: any\n The data of the artifact.\n metadata: dict\n User-defined metadata, saved with the artifact (optional).\n The metadata keys and values will be stored as strings.\n \"\"\"\n self.data = data\n if metadata is None:\n metadata = {}\n self.metadata = metadata\n\n @classmethod\n def read(cls, path, metadata=None, **kwargs):\n \"\"\" Reads the artifact from a file, given the path.\n\n Parameters\n ----------\n path: str\n Filename from which the artifact is read.\n metadata: dict or None\n The metadata for the artifact. If defined, it takes the place of any metadata\n defined in the artifact itself.\n Typically, this external artifact metadata comes from an artifact manifest. If the\n artifact has been written as a `pond` `VersionedArtifact`, then the two sources of\n metadata are identical.\n kwargs: dict\n Additional parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n with open(path, 'rb') as f:\n artifact = cls.read_bytes(f, metadata, **kwargs)\n return artifact\n\n @classmethod\n def read_bytes(cls, file_, metadata=None, **kwargs):\n \"\"\" Reads the artifact from a binary file.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object from which the artifact is read, opened in binary mode.\n metadata: dict or None\n The metadata for the artifact. If defined, it takes the place of any metadata\n defined in the artifact itself.\n Typically, this external artifact metadata comes from an artifact manifest. If the\n artifact has been written as a `pond` `VersionedArtifact`, then the two sources of\n metadata are identical.\n kwargs: dict\n Parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n artifact = cls._read_bytes(file_, **kwargs)\n if metadata is not None:\n artifact.metadata = metadata\n return artifact\n\n # todo why the kwargs\n def write(self, path, **kwargs):\n \"\"\" Writes the artifact to file.\n\n Parameters\n ----------\n path: str\n Path to which the artifact is written.\n kwargs: dict\n Parameters for the writer.\n\n \"\"\"\n with open(path, 'wb') as f:\n self.write_bytes(f, **kwargs)\n\n # --- Abstract interface\n\n @staticmethod\n @abstractmethod\n def filename(basename):\n \"\"\" Complete a base filename with an extension.\n\n Parameters\n ----------\n basename: str\n The filename without extension.\n\n Returns\n -------\n filename: str\n The completed filename.\n\n \"\"\"\n pass\n\n @classmethod\n @abstractmethod\n def _read_bytes(cls, file_, **kwargs):\n \"\"\" Reads the artifact from a binary file.\n\n This is a private method that loads the artifact from a binary file without dealing with\n the logic of the external metadata. It is called by `Artifact.read_bytes`.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object from which the artifact is read, opened in binary mode.\n kwargs: dict\n Parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n pass\n\n @abstractmethod\n def write_bytes(self, file_, **kwargs):\n \"\"\" Writes the artifact to binary file.\n\n This method also need to take care of writing the artifact metadata in the file itself,\n whenever possible.\n If the artifact is being written as a `pond` `VersionedArtifact`, then the metadata is also\n stored in an external manifest.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object to which the artifact is written, opened in binary mode.\n kwargs: dict\n Parameters for the writer.\n\n \"\"\"\n pass\n\n def get_artifact_metadata(self):\n \"\"\"\n This is not the user metadata!\n\n Returns\n -------\n\n \"\"\"\n return None" }, { "identifier": "ArtifactRegistry", "path": "pond/artifact/artifact_registry.py", "snippet": "class ArtifactRegistry:\n \"\"\" Registry of data types to compatible artifact classes. \"\"\"\n\n def __init__(self):\n self._register = defaultdict(list)\n\n def register(self, artifact_class, data_class, format=None):\n item = ArtifactRegistryItem(artifact_class=artifact_class, format=format)\n self._register[data_class].append(item)\n\n def get_available_artifacts(self, data_class):\n \"\"\" Get all available artifacts for a given data class.\n\n Parameters\n ----------\n data_class: class\n Data class for which we need to find an adapter.\n\n Returns\n -------\n items: list of ArtifactRegistryItem\n All registered (artifact, format) items compatible with data_class.\n \"\"\"\n return self._register[data_class]\n\n def get_artifact(self, data_class, format=None):\n \"\"\"\n In case multiple artifacts are available for the same data class and format,\n the last registered artifact is returned.\n\n Parameters\n ----------\n data_class: class\n Data class for which we need to find an adapter.\n format: str\n We require an adapter that can handle this file format.\n\n Returns\n -------\n artifact_class: class\n Artifact class\n\n \"\"\"\n items = self.get_available_artifacts(data_class)\n if len(items) == 0:\n raise ArtifactNotFound(data_class)\n\n if format is None:\n artifact_class = items[-1].artifact_class\n else:\n for item in items:\n if item.format == format:\n artifact_class = item.artifact_class\n break\n else:\n raise FormatNotFound(data_class, format)\n\n return artifact_class" }, { "identifier": "FormatNotFound", "path": "pond/exceptions.py", "snippet": "class FormatNotFound(Exception):\n def __init__(self, data_class, format):\n super().__init__(\n f\"Artifact with format '{format}' compatible with data type '{data_class.__name__}' not found.\"\n )" }, { "identifier": "ArtifactNotFound", "path": "pond/exceptions.py", "snippet": "class ArtifactNotFound(Exception):\n def __init__(self, data_class):\n super().__init__(\n f\"No artifact compatible with data type '{data_class.__name__}'.\"\n )" } ]
import pytest from pond.artifact import Artifact from pond.artifact.artifact_registry import ArtifactRegistry from pond.exceptions import FormatNotFound, ArtifactNotFound
2,095
class MockArtifactCSV(Artifact): pass class MockArtifactExcel(Artifact): pass @pytest.fixture() def registry(): registry = ArtifactRegistry() registry.register(MockArtifactCSV, list, format='csv') registry.register(MockArtifactExcel, list, format='xlsx') return registry def test_lookup_with_format(registry): # look-up with format cls = registry.get_artifact(list, format='csv') assert cls == MockArtifactCSV def test_lookup_no_format(registry): # look-up without format, return last inserted cls = registry.get_artifact(list) assert cls == MockArtifactExcel def test_lookup_format_not_found(registry): # look-up, format is not registry with pytest.raises(FormatNotFound) as excinfo: registry.get_artifact(list, format='foo') msg = str(excinfo.value) assert 'foo' in msg assert 'list' in msg def test_lookup_data_type_not_found(registry): # look-up, format is not registry
class MockArtifactCSV(Artifact): pass class MockArtifactExcel(Artifact): pass @pytest.fixture() def registry(): registry = ArtifactRegistry() registry.register(MockArtifactCSV, list, format='csv') registry.register(MockArtifactExcel, list, format='xlsx') return registry def test_lookup_with_format(registry): # look-up with format cls = registry.get_artifact(list, format='csv') assert cls == MockArtifactCSV def test_lookup_no_format(registry): # look-up without format, return last inserted cls = registry.get_artifact(list) assert cls == MockArtifactExcel def test_lookup_format_not_found(registry): # look-up, format is not registry with pytest.raises(FormatNotFound) as excinfo: registry.get_artifact(list, format='foo') msg = str(excinfo.value) assert 'foo' in msg assert 'list' in msg def test_lookup_data_type_not_found(registry): # look-up, format is not registry
with pytest.raises(ArtifactNotFound) as excinfo:
3
2023-12-24 13:05:58+00:00
4k
demirogun/pyethnobiology
pyethnobiology/indices.py
[ { "identifier": "RadialPlot", "path": "pyethnobiology/visualization.py", "snippet": "class RadialPlot:\n \"\"\"\n Creates a radial bar plot to visualize data in a circular layout.\n \"\"\"\n\n def __init__(self,\n data: pd.DataFrame,\n colorbar_title: str,\n indice: str = None,\n num_rows: int = 10,\n ytick_position: str = \"onbar\",\n colors: list = None,\n show_colorbar: bool = True,\n informant_column: str = \"informant\",\n taxon_column: str = \"taxon\",\n use_column: str = \"ailments_treated\"):\n\n self.data = data\n self.colorbar_title = colorbar_title\n self.indice = indice\n self.num_rows = num_rows\n self.yticks = None\n self.num_ticks = 5\n self.ytick_position = ytick_position\n self.colors = colors\n self.show_colorbar = show_colorbar\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n\n def plot(self):\n \"\"\"Creates and displays the radial bar plot.\"\"\"\n\n self._prepare_data()\n self._create_plot()\n self._customize_plot()\n return self.fig, self.ax\n\n def save_plot(self, filename: str, dpi: int = 300):\n \"\"\"Saves the radial bar plot to a file.\"\"\"\n\n self._prepare_data()\n self._create_plot()\n self._customize_plot()\n self.fig.savefig(filename, bbox_inches=\"tight\", dpi=dpi)\n\n def _prepare_data(self):\n \"\"\"Prepares data for plotting.\"\"\"\n\n self.indice_df = self.data.head(self.num_rows) if isinstance(self.num_rows, int) else self.data\n self.angles = np.linspace(0.05, 2 * np.pi - 0.05, len(self.indice_df), endpoint=False)\n self.indice_values = self.indice_df[self.indice].values\n self.taxon_values = self.indice_df[self.use_column].values if self.indice == \"FIC\" else self.indice_df[\n self.taxon_column].values\n\n def _create_plot(self):\n \"\"\"Creates the base plot.\"\"\"\n\n self.fig, self.ax = plt.subplots(figsize=(9, 12.6), subplot_kw={\"projection\": \"polar\"})\n self.ax.set_theta_offset(1.2 * np.pi / 2)\n self.ax.set_ylim(0 - (self.indice_values.min() * 0.4), self.indice_values.max())\n\n self._set_colormap()\n\n if len(self.indice_values) < 6:\n width = 1.4\n else:\n width = 0.52\n\n self.bars = self.ax.bar(self.angles, self.indice_values, color=self.colors, alpha=0.9, width=width, zorder=10)\n\n def _customize_plot(self):\n \"\"\"Customizes plot appearance.\"\"\"\n\n plt.rcParams[\"text.color\"] = \"#1f1f1f\"\n plt.rcParams.update({\"font.family\": \"serif\"})\n plt.rc(\"axes\", unicode_minus=False)\n\n # Wrap taxon labels for better readability\n self.taxon_values = [\"\\n\".join(wrap(r, 5, break_long_words=False)) for r in self.taxon_values]\n\n # Customize axes and ticks\n self.ax.xaxis.grid(False)\n self.ax.spines[\"start\"].set_color(\"none\")\n self.ax.spines[\"polar\"].set_color(\"none\")\n self.ax.set_xticks(self.angles)\n self.ax.set_xticklabels(self.taxon_values, size=12)\n\n # Set y-ticks and labels based on position preference\n self._set_yticks_and_labels()\n\n # Add colorbar if enabled\n if self.show_colorbar:\n self._add_colorbar()\n\n def _set_colormap(self):\n \"\"\"Sets the colormap for the bars.\"\"\"\n\n if self.colors is None:\n # Use default colors\n self.colors = [\"#ffcc70\", \"#c63d2f\"]\n else:\n # Use provided colors\n self.colors = self.colors\n\n # Create colormap and normalize values\n self.cmap = mplcolors.LinearSegmentedColormap.from_list(\"my_colormap\", self.colors, N=256)\n self.norm = mplcolors.Normalize(vmin=self.indice_values.min(), vmax=self.indice_values.max())\n self.colors = self.cmap(self.norm(self.indice_values))\n\n def _set_yticks_and_labels(self):\n \"\"\"Sets y-ticks and labels based on the specified position.\"\"\"\n\n self.ax.set_yticklabels([])\n self.yticks = np.linspace(0, self.indice_values.max() + (self.indice_values.max() * .20), self.num_ticks)\n self.ax.set_yticks(list(self.yticks))\n\n if self.ytick_position == \"on_line\":\n # Place y-tick labels on a separate line\n pad = self.indice_values.min() * 0.1\n for yt in self.yticks:\n self.ax.text(-0.2 * np.pi / 2, yt + pad, round(yt, 3), ha=\"center\", size=11, zorder=15)\n else:\n # Place y-tick labels on the bars\n for bar, length in zip(self.bars, self.indice_values):\n height = bar.get_height()\n self.ax.text(bar.get_x() + bar.get_width() / 2, height, f'{length:.3f}', ha='center', va='bottom',\n fontsize=10, zorder=15)\n\n def _add_colorbar(self):\n \"\"\"Adds a colorbar to the plot.\"\"\"\n\n cax = inset_axes(\n self.ax,\n width=\"100%\",\n height=\"100%\",\n loc=\"center\",\n bbox_to_anchor=(0.325, 0.1, 0.35, 0.01),\n bbox_transform=self.fig.transFigure\n )\n\n # Access the already-defined yticks\n yticks = np.linspace(self.indice_values.min(), self.indice_values.max(), self.num_ticks)\n\n cbar = self.fig.colorbar(\n ScalarMappable(norm=self.norm, cmap=self.cmap),\n cax=cax,\n orientation=\"horizontal\",\n ticks=yticks\n )\n\n cbar.outline.set_visible(False)\n cbar.ax.xaxis.set_tick_params(size=0)\n cbar.set_label(self.colorbar_title, size=12, labelpad=-40)" }, { "identifier": "HeatmapPlot", "path": "pyethnobiology/visualization.py", "snippet": "class HeatmapPlot:\n \"\"\"\n Creates a heatmap plot to visualize data in a grid format.\n \"\"\"\n\n def __init__(self,\n data: pd.DataFrame,\n title: str,\n value_column: str,\n row_column: str,\n column_column: str,\n cmap: str = \"coolwarm\",\n show_colorbar: bool = True,\n colorbar_shrink: float = 0.50,\n plot_width: float = 10,\n plot_height: float = 8,\n dpi: int = 300,\n fillna_zero: bool = True):\n\n self.data = data\n self.title = title\n self.value_column = value_column\n self.row_column = row_column\n self.column_column = column_column\n self.cmap = cmap\n self.show_colorbar = show_colorbar\n self.colorbar_shrink = colorbar_shrink\n self.plot_width = plot_width\n self.plot_height = plot_height\n self.dpi = dpi\n self.fillna_zero = fillna_zero\n\n def plot(self):\n \"\"\"Creates and displays the heatmap plot.\"\"\"\n\n self._prepare_data()\n self._create_plot()\n self._customize_plot()\n return self.fig, self.ax\n\n def save_plot(self, filename: str, dpi: int = 300):\n \"\"\"Saves the heatmap plot to a file.\"\"\"\n\n self._prepare_data()\n self._create_plot()\n self._customize_plot()\n self.fig.savefig(filename, bbox_inches=\"tight\", dpi=dpi)\n\n def _prepare_data(self):\n \"\"\"Pivots data into a suitable format for heatmap.\"\"\"\n\n self.heatmap_data = self.data.pivot(index=self.row_column, columns=self.column_column, values=self.value_column)\n if self.fillna_zero:\n self.heatmap_data = self.heatmap_data.fillna(0)\n\n def _create_plot(self):\n \"\"\"Creates the base heatmap plot.\"\"\"\n\n self.fig, self.ax = plt.subplots(figsize=(self.plot_width, self.plot_height), dpi=self.dpi)\n self.im = self.ax.imshow(self.heatmap_data, cmap=self.cmap)\n\n def _customize_plot(self):\n \"\"\"Customizes plot appearance.\"\"\"\n\n plt.rcParams[\"text.color\"] = \"#1f1f1f\"\n plt.rcParams.update({\"font.family\": \"serif\"})\n plt.rc(\"axes\", unicode_minus=False)\n\n # Set tick labels\n if len(self.heatmap_data.columns) > 10:\n rotation, ha = (90, \"center\")\n else:\n rotation, ha = (45, \"right\")\n\n plt.xticks(ticks=range(len(self.heatmap_data.columns)), labels=self.heatmap_data.columns, rotation=rotation,\n ha=ha)\n plt.yticks(ticks=range(len(self.heatmap_data.index)), labels=self.heatmap_data.index)\n\n # Add colorbar if enabled\n if self.show_colorbar:\n self._add_colorbar()\n\n # Customize labels and title\n plt.xlabel(self.column_column)\n plt.ylabel(self.row_column)\n plt.title(self.title) # Add a title if needed\n\n def _add_colorbar(self):\n \"\"\"Adds a colorbar to the plot.\"\"\"\n\n plt.colorbar(self.im, label=self.title, shrink=self.colorbar_shrink)" } ]
import pandas as pd from pyethnobiology.visualization import RadialPlot from pyethnobiology.visualization import HeatmapPlot
2,788
class FC: def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"): """ Initializes the class with necessary data and column names. Args: data (pd.DataFrame): DataFrame containing plant usage information. informant_column (str, optional): Name of the column containing informant IDs. Defaults to "informant". taxon_column (str, optional): Name of the column containing species names. Defaults to "taxon". use_column (str, optional): Name of the column containing plant uses. Defaults to "ailments_treated". """ self.data = data self.informant_column = informant_column self.taxon_column = taxon_column self.use_column = use_column def calculate(self): """ Calculates the frequency of citation (FC) for each species. Returns: pd.DataFrame: DataFrame containing taxon and FC columns. """ # Calculate FC per species by counting unique informants for each taxon fc_df = ( self.data.groupby(self.taxon_column, observed=True)[self.informant_column] .nunique() .reset_index(name="FC") ) # Sort FC values in descending order fc_df = fc_df.sort_values(by="FC", ascending=False).reset_index(drop=True) return fc_df def save_data(self): FC_df = self.calculate() FC_df.to_csv("frequency_of_citation_FC.csv", index=False) print("Saved to frequency_of_citation_FC.csv") def plot_radial(self, filename="FC.png", dpi=300, num_row=10, ytick_position="onbar", colors=None, show_colorbar=True): # Plot radial bar chart
class FC: def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"): """ Initializes the class with necessary data and column names. Args: data (pd.DataFrame): DataFrame containing plant usage information. informant_column (str, optional): Name of the column containing informant IDs. Defaults to "informant". taxon_column (str, optional): Name of the column containing species names. Defaults to "taxon". use_column (str, optional): Name of the column containing plant uses. Defaults to "ailments_treated". """ self.data = data self.informant_column = informant_column self.taxon_column = taxon_column self.use_column = use_column def calculate(self): """ Calculates the frequency of citation (FC) for each species. Returns: pd.DataFrame: DataFrame containing taxon and FC columns. """ # Calculate FC per species by counting unique informants for each taxon fc_df = ( self.data.groupby(self.taxon_column, observed=True)[self.informant_column] .nunique() .reset_index(name="FC") ) # Sort FC values in descending order fc_df = fc_df.sort_values(by="FC", ascending=False).reset_index(drop=True) return fc_df def save_data(self): FC_df = self.calculate() FC_df.to_csv("frequency_of_citation_FC.csv", index=False) print("Saved to frequency_of_citation_FC.csv") def plot_radial(self, filename="FC.png", dpi=300, num_row=10, ytick_position="onbar", colors=None, show_colorbar=True): # Plot radial bar chart
radial_plot = RadialPlot(self.calculate(), "Frequency of Citation (FC)", "FC", num_row, ytick_position, colors,
0
2023-12-25 01:06:51+00:00
4k
Zitronenjoghurt/Colonaut
src/ui/dialogue.py
[ { "identifier": "DisplayText", "path": "src/ui/display_text.py", "snippet": "class DisplayText:\n CHARACTER_SPACES = {\n \"energy\": 0,\n \"lifesupport\": 0,\n \"nexus\": 0,\n \"sensor\": 0,\n \"you\": 0\n }\n\n def __init__(\n self, \n text: str|list[str], \n actions: Optional[dict] = None, \n action_answers: Optional[dict] = None, \n character: Optional[str] = None, \n tag: Optional[str] = None, \n char_delay: Optional[int] = None, \n line_delay: Optional[int] = None, \n line_symbol: Optional[bool] = None, \n newline: Optional[bool] = None,\n id: Optional[str] = None,\n jump_to: Optional[str] = None, \n event: Optional[str] = None,\n event_data: Any = None\n ) -> None:\n if actions is None:\n actions = {}\n if action_answers is None:\n action_answers = {}\n if tag is None:\n tag = CONFIG.DEFAULT_SHIP_CONSOLE_STYLE_TAG\n if char_delay is None:\n char_delay = CONFIG.DEFAULT_SHIP_CONSOLE_CHAR_DELAY\n if line_delay is None:\n line_delay = CONFIG.DEFAULT_SHIP_CONSOLE_LINE_DELAY\n if line_symbol is None:\n line_symbol = True\n if newline is None:\n newline = True\n\n if character and character not in self.CHARACTER_SPACES:\n raise ValueError(f\"Character {character} does not exist.\")\n elif character:\n character = character.lower() \n\n if isinstance(text, list):\n self.texts = text\n else:\n self.texts = [text]\n\n self.texts = [str(text) for text in self.texts]\n self.actions = actions\n self.action_answers = action_answers\n self.character = character\n self.tag = tag\n self.char_delay = char_delay\n self.line_delay = line_delay\n self.line_symbol = line_symbol\n self.newline = newline\n self.id = id\n self.jump_to = jump_to\n self.event = event\n self.event_data = event_data\n\n @staticmethod\n def from_dict(data) -> 'DisplayText':\n text = data.get(\"text\", None)\n if text is None:\n raise ValueError(\"An error occured while initializing DisplayText: no text provided\")\n actions = data.get(\"actions\", None)\n action_answers = data.get(\"action_answers\", None)\n character = data.get(\"character\", None)\n tag = data.get(\"tag\", None)\n char_delay = data.get(\"char_delay\", None)\n line_delay = data.get(\"line_delay\", None)\n newline = data.get(\"newline\", None)\n line_symbol = data.get(\"line_symbol\", None)\n id = data.get(\"id\", None)\n jump_to = data.get(\"jump_to\", None)\n event = data.get(\"event\", None)\n event_data = data.get(\"event_data\", None)\n return DisplayText(\n text=text, \n actions=actions, \n action_answers=action_answers, \n character=character, \n tag=tag, \n char_delay=char_delay, \n line_delay=line_delay, \n newline=newline, line_symbol=line_symbol,\n id=id,\n jump_to=jump_to,\n event=event,\n event_data=event_data\n )\n\n def add_text(self, text: str|list[str]) -> None:\n if isinstance(text, list):\n self.texts.extend(text)\n else:\n self.texts.append(text)\n \n def get_texts(self) -> list[dict]:\n result = []\n for i, text in enumerate(self.texts):\n if self.line_symbol and self.character:\n result.append({\"text\": self.character.upper(), \"tag\": self.character, \"char_delay\": 0, \"line_delay\": 0, \"newline\": False})\n result.append({\"text\": \" \"*self.CHARACTER_SPACES[self.character]+\"> \", \"tag\": \"computer\", \"char_delay\": 0, \"line_delay\": 0, \"newline\": False})\n elif self.line_symbol:\n result.append({\"text\": \"> \", \"tag\": \"computer\", \"char_delay\": 0, \"line_delay\": 0, \"newline\": False})\n if i+1 == len(self.texts) and self.has_actions():\n result.append({\"text\": text, \"tag\": self.tag, \"char_delay\": self.char_delay, \"line_delay\": 0, \"newline\": self.newline})\n else:\n result.append({\"text\": text, \"tag\": self.tag, \"char_delay\": self.char_delay, \"line_delay\": self.line_delay, \"newline\": self.newline})\n return result\n \n def get_actions(self) -> dict:\n return self.actions\n \n def get_action_answer(self, action_name: str) -> 'DisplayText':\n if action_name not in self.action_answers:\n answer = action_name\n else:\n answer = self.action_answers[action_name]\n return DisplayText(text=answer, character=\"you\", char_delay=0, line_delay=self.line_delay)\n \n def has_actions(self) -> bool:\n return len(self.actions) > 0\n \n def get_id(self) -> str:\n if self.id:\n return self.id\n else:\n return \"\"\n \n def is_jumping(self) -> bool:\n return isinstance(self.jump_to, str)\n \n def get_jump_to(self) -> str:\n if self.jump_to:\n return self.jump_to\n else:\n return \"\"\n \n def get_event(self) -> Optional[str]:\n return self.event\n \n def get_event_data(self) -> Any:\n return self.event_data" }, { "identifier": "EventTypeNotSubscribedError", "path": "src/constants/custom_exceptions.py", "snippet": "class EventTypeNotSubscribedError(Exception):\n def __init__(self, event_type: str) -> None:\n message = f\"Event type {event_type} has no listener yet.\"\n super().__init__(message)" }, { "identifier": "Event", "path": "src/events/event.py", "snippet": "class Event():\n TYPES = EventTypes\n \n def __init__(self, type: str, **kwargs) -> None:\n self.type = type\n self.data = kwargs" }, { "identifier": "EventBus", "path": "src/events/event_bus.py", "snippet": "class EventBus():\n _instance = None\n\n def __init__(self) -> None:\n if EventBus._instance is not None:\n raise RuntimeError(\"Tried to initialize multiple instances of EventBus.\")\n \n self.listeners: dict[str, Callable] = {}\n\n @staticmethod\n def get_instance() -> 'EventBus':\n if EventBus._instance is None:\n EventBus._instance = EventBus()\n return EventBus._instance\n \n @staticmethod\n def reset_instance() -> None:\n EventBus._instance = None\n \n \"\"\"\n Possible errors:\n - RuntimeError\n \"\"\"\n def subscribe(self, event_type: str, listener: Callable) -> None:\n if self.listeners.get(event_type, None) is not None:\n raise RuntimeError(f\"Subscription on event type {event_type} already exists.\")\n self.listeners[event_type] = listener\n\n \"\"\"\n Possible errors:\n - RuntimeError\n \"\"\"\n def unsubscribe(self, event_type: str, listener: Callable) -> None:\n if self.listeners.get(event_type, None) is None:\n raise RuntimeError(f\"Subscription on event type {event_type} does not exist.\")\n self.listeners.pop(event_type)\n\n \"\"\"\n Possible errors:\n - EventTypeNotSubscribedError\n - RuntimeError\n \"\"\"\n def publish(self, event: Event) -> Response:\n if event.type not in self.listeners:\n raise EventTypeNotSubscribedError(event_type=event.type)\n \n listener = self.listeners[event.type]\n try:\n response = listener(**event.data)\n except Exception as e:\n raise RuntimeError(f\"An error occured while publishing event {event.type}.\") from e\n \n return response" }, { "identifier": "construct_path", "path": "src/utils/file_operations.py", "snippet": "def construct_path(relative_path: str) -> str:\n path_parts = relative_path.split(\"/\")\n absolute_path = os.path.join(ROOT_DIR, *path_parts)\n return absolute_path" }, { "identifier": "file_to_dict", "path": "src/utils/file_operations.py", "snippet": "def file_to_dict(file_path: str) -> dict:\n with open(file_path, 'r', encoding='utf-8') as f:\n data = json.load(f)\n if not isinstance(data, dict):\n raise RuntimeError(\"Deserialized data is not a dictionary.\")\n return data" }, { "identifier": "files_in_directory", "path": "src/utils/file_operations.py", "snippet": "def files_in_directory(path: str, suffix: Optional[str] = None) -> list[str]:\n if not os.path.exists(path):\n raise ValueError(f\"Directory {path} does not exist.\")\n \n files = []\n for file in os.listdir(path):\n if suffix is not None:\n if suffix in file:\n files.append(file)\n else:\n files.append(file)\n return files" } ]
from copy import deepcopy from src.ui.display_text import DisplayText from src.constants.custom_exceptions import EventTypeNotSubscribedError from src.events.event import Event from src.events.event_bus import EventBus from src.utils.file_operations import construct_path, file_to_dict, files_in_directory
2,649
EVENT_BUS = EventBus.get_instance() DIALOGUE_CATEGORIES = ["system"] DIALOGUE_FILE_PATH = construct_path("src/data/dialogue/{dialogue_category}/") class Dialogue(): def __init__(self, name: str, display_texts: list[DisplayText]) -> None: self.name = name self.display_texts = display_texts self.current_index = 0 self.action_pending = False self.actions = {} self.event_pending = False self.event = None self.id_index_map = {} # Register indices for entry ids for i, display_text in enumerate(self.display_texts): id = display_text.get_id() if id in self.id_index_map: raise RuntimeError(f"An error occured while loading dialogue '{name}': the id '{id}' of entry at index {i} already exists at index {self.id_index_map[id]}.") if id != "": self.id_index_map[id] = i # Verify action target ids for i, display_text in enumerate(self.display_texts): actions = display_text.get_actions() if len(actions) == 0: continue for action_target in actions.values(): if action_target not in self.id_index_map: raise RuntimeError(f"An error occured while loading dialogue '{name}': the action of the entry at index {i} references an invalid id '{action_target}'.") def get_texts(self) -> list[DisplayText]: display_texts = [] i: int = self.current_index while i < len(self.display_texts): self.current_index = i display_text = self.display_texts[i] display_texts.append(display_text) event_type = display_text.get_event() if isinstance(event_type, str): event_data = display_text.get_event_data() try: if isinstance(event_data, dict): self.event = Event(type=event_type, **event_data) self.event_pending = True else: self.event = Event(type=event_type) self.event_pending = True
EVENT_BUS = EventBus.get_instance() DIALOGUE_CATEGORIES = ["system"] DIALOGUE_FILE_PATH = construct_path("src/data/dialogue/{dialogue_category}/") class Dialogue(): def __init__(self, name: str, display_texts: list[DisplayText]) -> None: self.name = name self.display_texts = display_texts self.current_index = 0 self.action_pending = False self.actions = {} self.event_pending = False self.event = None self.id_index_map = {} # Register indices for entry ids for i, display_text in enumerate(self.display_texts): id = display_text.get_id() if id in self.id_index_map: raise RuntimeError(f"An error occured while loading dialogue '{name}': the id '{id}' of entry at index {i} already exists at index {self.id_index_map[id]}.") if id != "": self.id_index_map[id] = i # Verify action target ids for i, display_text in enumerate(self.display_texts): actions = display_text.get_actions() if len(actions) == 0: continue for action_target in actions.values(): if action_target not in self.id_index_map: raise RuntimeError(f"An error occured while loading dialogue '{name}': the action of the entry at index {i} references an invalid id '{action_target}'.") def get_texts(self) -> list[DisplayText]: display_texts = [] i: int = self.current_index while i < len(self.display_texts): self.current_index = i display_text = self.display_texts[i] display_texts.append(display_text) event_type = display_text.get_event() if isinstance(event_type, str): event_data = display_text.get_event_data() try: if isinstance(event_data, dict): self.event = Event(type=event_type, **event_data) self.event_pending = True else: self.event = Event(type=event_type) self.event_pending = True
except EventTypeNotSubscribedError:
1
2023-12-22 21:24:33+00:00
4k
akkoaya/ArticleSpider
ArticleSpider/spiders/zhihu.py
[ { "identifier": "ZhihuQuestionItem", "path": "ArticleSpider/items.py", "snippet": "class ZhihuQuestionItem(scrapy.Item):\n # 知乎的问题 item\n question_id = scrapy.Field()\n topics = scrapy.Field()\n url = scrapy.Field()\n title = scrapy.Field()\n # content = scrapy.Field()\n answer_num = scrapy.Field()\n comments_num = scrapy.Field()\n subscriber_num = scrapy.Field()\n view_num = scrapy.Field()\n crawl_time = scrapy.Field()\n\n def get_insert_sql(self):\n insert_sql = \"\"\"\n insert into zhihu_question(question_id,topics,url,title,answer_num,\n comments_num,subscriber_num,view_num,crawl_time,)\n values(%s,%s,%s,%s,%s,%s,%s,%s,%s) \n \n \"\"\"\n # ON DUPLICATE KEY UPDATE answer_num = VALUES(answer_num)\n # 上面这一行是mysql的更新语句,防止主键重复而报错,如果已经存在就更新内容,title=VALUES(title)的话,更新的内容就是title,可以添加其他更新字段\n # 因为爬取的时候是有可能会爬到同一个question的,所以主键可能会重复\n # 必须添加在values()之后\n\n question_id = self[\"question_id\"][0]\n topics = \",\".join(self[\"topics\"])\n url = self[\"url\"][0]\n title = \"\".join(self[\"title\"])\n # content = \"\".join(self[\"content\"])\n #utils目录下的common.py写一个专门提取数字的方法get_nums,然后给这里调用\n answer_num = get_nums(self['answer_num'][0])\n comments_num = get_nums(self['comments_num'][0])\n subscriber_num = get_nums(self['subscriber_num'][0])\n view_num = get_nums(self['view_num'][1])\n #strftime()可以把datetime格式转为str类型,后面跟自定义的格式,可以在settings.py中配置好,名称为SQL_DATETIME_FORMAT以及SQL_DATE_FORMAT\n crawl_time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n params = (\n question_id, topics, url, title, answer_num, comments_num, subscriber_num, view_num, crawl_time)\n #注意这里一定要和上面的sql语句里的顺序一致\n return insert_sql, params" }, { "identifier": "ZhihuAnswerItem", "path": "ArticleSpider/items.py", "snippet": "class ZhihuAnswerItem(scrapy.Item):\n # 知乎的回答 item\n answer_id = scrapy.Field()\n url = scrapy.Field()\n question_id = scrapy.Field()\n author_name = scrapy.Field()\n content = scrapy.Field()\n praise_num = scrapy.Field()\n comments_num = scrapy.Field()\n create_time = scrapy.Field()\n update_time = scrapy.Field()\n crawl_time = scrapy.Field()\n\n def get_insert_sql(self):\n # 插入知乎question表的sql语句\n insert_sql = \"\"\"\n insert into zhihu_answer(answer_id,url,question_id,author_name,content,praise_num,\n comments_num,create_time,update_time,crawl_time,)\n values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \n \n \"\"\"\n # ON DUPLICATE KEY UPDATE content = VALUES(content) praise_num = VALUES(praise_num)\n # 上面这一行是mysql的更新语句,防止主键重复而报错,如果已经存在就更新内容,answer_id=VALUES(answer_id)的话,更新的内容就是answer_id,可以添加其他更新字段\n # 因为爬取的时候是有可能会爬到同一个answer的,所以主键可能会重复\n # 必须添加在values()之后\n\n #因为create_time和update_time在原来的json里是一个int的字段,datetime.datetime.fromtimestamp()方法可以解析为datetime格式\n create_time = datetime.datetime.fromtimestamp(self['create_time']).strftime(\"%Y-%m-%d %H:%M:%S\")\n update_time = datetime.datetime.fromtimestamp(self['update_time']).strftime(\"%Y-%m-%d %H:%M:%S\")\n crawl_time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n params = (\n self[\"answer_id\"], self[\"url\"], self['question_id'],\n self['author_name'], self['content'], self['praise_num'],\n self['comments_num'], create_time, update_time, crawl_time\n )\n return insert_sql, params" } ]
import datetime import scrapy import time import pickle import os import re import json from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver import Keys from urllib import parse from scrapy.loader import ItemLoader from ..items import ZhihuQuestionItem from ..items import ZhihuAnswerItem
2,263
class ZhihuSpider(scrapy.Spider): name = "zhihu" allowed_domains = ["www.zhihu.com"] start_urls = ["https://www.zhihu.com"] headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36', 'referer':'https://www.zhihu.com/', } #需要把`feeds?`后面的`cursor`的内容手动过滤掉,到`include`为止;然后把最后的`session_id`的内容也过滤掉 #然后在`question/`,`limit=`,`offset=`后面,一一加上占位符 #因为offset的值实际上就是请求添加的回答数量 start_answer_url = 'https://www.zhihu.com/api/v4/questions/{0}/feeds?include=&limit={1}&offset={2}&order=default&platform=desktop' def start_requests(self): if os.path.isfile("..\cookies\zhihucookie.txt") is False: Chrome_options = webdriver.ChromeOptions() Chrome_options.add_experimental_option('detach', True) Chrome_options.add_experimental_option('excludeSwitches', ['enable-automation']) browser = webdriver.Chrome(options=Chrome_options) browser.maximize_window() browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", { "source": """ Object.defineProperty(navigator, 'webdriver', { get: () => undefined }) """ }) browser.get("https://www.zhihu.com") time.sleep(1) browser.find_element(By.CSS_SELECTOR, '.SignFlow-tabs div[class="SignFlow-tab"]').click() browser.find_element(By.CSS_SELECTOR, '.SignFlow-account input[name="username"]').send_keys("username") browser.find_element(By.CSS_SELECTOR, '.SignFlow-password input[name="password"]').send_keys("password") time.sleep(1) browser.find_element(By.CSS_SELECTOR, '.SignFlow-password input[name="password"]').send_keys(Keys.ENTER) time.sleep(10) browser.get("https://www.zhihu.com") cookies = browser.get_cookies() time.sleep(1) cookie_dict = {} # 要使用cookie_dict,需要在settings.py文件中设置 COOKIES_ENABLED = True # 这样只有首个Request需要加入cookies参数,后续的Request就不需要在加入cookies参数了,会自动读取cookies # 写入文件 f = open( '..\cookies\zhihucookie.txt', 'wb') pickle.dump(cookies, f) # 把文件pickle.dump到文件夹中 f.close() for cookie in cookies: cookie_dict[cookie['name']] = cookie['value'] browser.close() return [scrapy.Request(url=self.start_urls[0],headers=self.headers, dont_filter=True, cookies=cookie_dict)] else: f = open( '..r\cookies\zhihucookie.txt', 'rb') cookies = pickle.load(f) f.close() cookie_dict = {} for cookie in cookies: cookie_dict[cookie['name']] = cookie['value'] return [scrapy.Request(url=self.start_urls[0],headers = self.headers, dont_filter=True, cookies=cookie_dict)] def parse(self, response): # 获取所有页面内的url,并完整化 all_urls = response.css('a::attr(href)').extract() all_urls = [parse.urljoin(response.url, url) for url in all_urls] #进一步过滤掉不是url的内容 all_urls = filter(lambda x: True if x.startswith('https') else False, all_urls) # 提取知乎问题的url for url in all_urls: match_obj = re.match('(.*zhihu.com/question/(\d+))(/|$).*',url) #`$`表示结尾符 if match_obj: request_url = match_obj.group(1) question_id = match_obj.group(2) #如果满足re.match的要求,则下载页面,并交给parse_question函数处理 yield scrapy.Request(url=request_url,meta={'question_id':question_id} ,headers=self.headers, callback=self.parse_question) #break 这里break出去方便调试,就不会有源源不断的请求过来 else: #如果不满足,则进一步跟踪 yield scrapy.Request(url=url,headers=self.headers, callback=self.parse) #调试的时候也可以把这个注释掉 #pass def parse_question(self, response): #处理question
class ZhihuSpider(scrapy.Spider): name = "zhihu" allowed_domains = ["www.zhihu.com"] start_urls = ["https://www.zhihu.com"] headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36', 'referer':'https://www.zhihu.com/', } #需要把`feeds?`后面的`cursor`的内容手动过滤掉,到`include`为止;然后把最后的`session_id`的内容也过滤掉 #然后在`question/`,`limit=`,`offset=`后面,一一加上占位符 #因为offset的值实际上就是请求添加的回答数量 start_answer_url = 'https://www.zhihu.com/api/v4/questions/{0}/feeds?include=&limit={1}&offset={2}&order=default&platform=desktop' def start_requests(self): if os.path.isfile("..\cookies\zhihucookie.txt") is False: Chrome_options = webdriver.ChromeOptions() Chrome_options.add_experimental_option('detach', True) Chrome_options.add_experimental_option('excludeSwitches', ['enable-automation']) browser = webdriver.Chrome(options=Chrome_options) browser.maximize_window() browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", { "source": """ Object.defineProperty(navigator, 'webdriver', { get: () => undefined }) """ }) browser.get("https://www.zhihu.com") time.sleep(1) browser.find_element(By.CSS_SELECTOR, '.SignFlow-tabs div[class="SignFlow-tab"]').click() browser.find_element(By.CSS_SELECTOR, '.SignFlow-account input[name="username"]').send_keys("username") browser.find_element(By.CSS_SELECTOR, '.SignFlow-password input[name="password"]').send_keys("password") time.sleep(1) browser.find_element(By.CSS_SELECTOR, '.SignFlow-password input[name="password"]').send_keys(Keys.ENTER) time.sleep(10) browser.get("https://www.zhihu.com") cookies = browser.get_cookies() time.sleep(1) cookie_dict = {} # 要使用cookie_dict,需要在settings.py文件中设置 COOKIES_ENABLED = True # 这样只有首个Request需要加入cookies参数,后续的Request就不需要在加入cookies参数了,会自动读取cookies # 写入文件 f = open( '..\cookies\zhihucookie.txt', 'wb') pickle.dump(cookies, f) # 把文件pickle.dump到文件夹中 f.close() for cookie in cookies: cookie_dict[cookie['name']] = cookie['value'] browser.close() return [scrapy.Request(url=self.start_urls[0],headers=self.headers, dont_filter=True, cookies=cookie_dict)] else: f = open( '..r\cookies\zhihucookie.txt', 'rb') cookies = pickle.load(f) f.close() cookie_dict = {} for cookie in cookies: cookie_dict[cookie['name']] = cookie['value'] return [scrapy.Request(url=self.start_urls[0],headers = self.headers, dont_filter=True, cookies=cookie_dict)] def parse(self, response): # 获取所有页面内的url,并完整化 all_urls = response.css('a::attr(href)').extract() all_urls = [parse.urljoin(response.url, url) for url in all_urls] #进一步过滤掉不是url的内容 all_urls = filter(lambda x: True if x.startswith('https') else False, all_urls) # 提取知乎问题的url for url in all_urls: match_obj = re.match('(.*zhihu.com/question/(\d+))(/|$).*',url) #`$`表示结尾符 if match_obj: request_url = match_obj.group(1) question_id = match_obj.group(2) #如果满足re.match的要求,则下载页面,并交给parse_question函数处理 yield scrapy.Request(url=request_url,meta={'question_id':question_id} ,headers=self.headers, callback=self.parse_question) #break 这里break出去方便调试,就不会有源源不断的请求过来 else: #如果不满足,则进一步跟踪 yield scrapy.Request(url=url,headers=self.headers, callback=self.parse) #调试的时候也可以把这个注释掉 #pass def parse_question(self, response): #处理question
item_loader = ItemLoader(item=ZhihuQuestionItem(), response=response)
0
2023-12-29 15:05:22+00:00
4k
ApiaoSamaa/task2
train.py
[ { "identifier": "Encoder", "path": "models.py", "snippet": "class Encoder(nn.Module):\n \"\"\"\n Encoder.\n \"\"\"\n\n def __init__(self, encoded_image_size=14):\n super(Encoder, self).__init__()\n self.enc_image_size = encoded_image_size\n\n resnet = torchvision.models.resnet101(pretrained=True) # pretrained ImageNet ResNet-101\n\n # Remove linear and pool layers (since we're not doing classification)\n modules = list(resnet.children())[:-2]\n self.resnet = nn.Sequential(*modules)\n\n # Resize image to fixed size to allow input images of variable size\n self.adaptive_pool = nn.AdaptiveAvgPool2d((encoded_image_size, encoded_image_size))\n\n self.fine_tune()\n\n def forward(self, images):\n \"\"\"\n Forward propagation.\n\n :param images: images, a tensor of dimensions (batch_size, 3, image_size, image_size)\n :return: encoded images\n \"\"\"\n out = self.resnet(images) # (batch_size, 2048, image_size/32, image_size/32)\n out = self.adaptive_pool(out) # (batch_size, 2048, encoded_image_size, encoded_image_size)\n out = out.permute(0, 2, 3, 1) # (batch_size, encoded_image_size, encoded_image_size, 2048)\n return out\n\n def fine_tune(self, fine_tune=True):\n \"\"\"\n Allow or prevent the computation of gradients for convolutional blocks 2 through 4 of the encoder.\n\n :param fine_tune: Allow?\n \"\"\"\n for p in self.resnet.parameters():\n p.requires_grad = False\n # If fine-tuning, only fine-tune convolutional blocks 2 through 4\n for c in list(self.resnet.children())[5:]:\n for p in c.parameters():\n p.requires_grad = fine_tune" }, { "identifier": "DecoderWithAttention", "path": "models.py", "snippet": "class DecoderWithAttention(nn.Module):\n \"\"\"\n Decoder.\n \"\"\"\n\n def __init__(self, attention_dim, embed_dim, decoder_dim, vocab_size, encoder_dim=2048, dropout=0.5):\n \"\"\"\n :param attention_dim: size of attention network\n :param embed_dim: embedding size\n :param decoder_dim: size of decoder's RNN\n :param vocab_size: size of vocabulary\n :param encoder_dim: feature size of encoded images\n :param dropout: dropout\n \"\"\"\n super(DecoderWithAttention, self).__init__()\n\n self.encoder_dim = encoder_dim\n self.attention_dim = attention_dim\n self.embed_dim = embed_dim\n self.decoder_dim = decoder_dim\n self.vocab_size = vocab_size\n self.dropout = dropout\n\n self.attention = Attention(encoder_dim, decoder_dim, attention_dim) # attention network\n\n self.embedding = nn.Embedding(vocab_size, embed_dim) # embedding layer\n self.dropout = nn.Dropout(p=self.dropout)\n self.decode_step = nn.LSTMCell(embed_dim + encoder_dim, decoder_dim, bias=True) # decoding LSTMCell\n self.init_h = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial hidden state of LSTMCell\n self.init_c = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial cell state of LSTMCell\n self.f_beta = nn.Linear(decoder_dim, encoder_dim) # linear layer to create a sigmoid-activated gate\n self.sigmoid = nn.Sigmoid()\n self.fc = nn.Linear(decoder_dim, vocab_size) # linear layer to find scores over vocabulary\n self.init_weights() # initialize some layers with the uniform distribution\n\n def init_weights(self):\n \"\"\"\n Initializes some parameters with values from the uniform distribution, for easier convergence.\n \"\"\"\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n\n def load_pretrained_embeddings(self, embeddings):\n \"\"\"\n Loads embedding layer with pre-trained embeddings.\n\n :param embeddings: pre-trained embeddings\n \"\"\"\n self.embedding.weight = nn.Parameter(embeddings)\n\n def fine_tune_embeddings(self, fine_tune=True):\n \"\"\"\n Allow fine-tuning of embedding layer? (Only makes sense to not-allow if using pre-trained embeddings).\n\n :param fine_tune: Allow?\n \"\"\"\n for p in self.embedding.parameters():\n p.requires_grad = fine_tune\n\n def init_hidden_state(self, encoder_out):\n \"\"\"\n Creates the initial hidden and cell states for the decoder's LSTM based on the encoded images.\n\n :param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)\n :return: hidden state, cell state\n \"\"\"\n mean_encoder_out = encoder_out.mean(dim=1)\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\n c = self.init_c(mean_encoder_out)\n return h, c\n\n def forward(self, encoder_out, encoded_captions, caption_lengths):\n \"\"\"\n Forward propagation.\n\n :param encoder_out: encoded images, a tensor of dimension (batch_size, enc_image_size, enc_image_size, encoder_dim)\n :param encoded_captions: encoded captions, a tensor of dimension (batch_size, max_caption_length)\n :param caption_lengths: caption lengths, a tensor of dimension (batch_size, 1)\n :return: scores for vocabulary, sorted encoded captions, decode lengths, weights, sort indices\n \"\"\"\n\n batch_size = encoder_out.size(0)\n encoder_dim = encoder_out.size(-1)\n vocab_size = self.vocab_size\n\n # Flatten image\n encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)\n num_pixels = encoder_out.size(1)\n\n # Sort input data by decreasing lengths; why? apparent below\n caption_lengths, sort_ind = caption_lengths.squeeze(1).sort(dim=0, descending=True)\n encoder_out = encoder_out[sort_ind]\n encoded_captions = encoded_captions[sort_ind]\n\n # Embedding\n embeddings = self.embedding(encoded_captions) # (batch_size, max_caption_length, embed_dim)\n\n # Initialize LSTM state\n h, c = self.init_hidden_state(encoder_out) # (batch_size, decoder_dim)\n\n # We won't decode at the <end> position, since we've finished generating as soon as we generate <end>\n # So, decoding lengths are actual lengths - 1\n decode_lengths = (caption_lengths - 1).tolist()\n\n # Create tensors to hold word predicion scores and alphas\n predictions = torch.zeros(batch_size, max(decode_lengths), vocab_size).to(device)\n alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels).to(device)\n\n # At each time-step, decode by\n # attention-weighing the encoder's output based on the decoder's previous hidden state output\n # then generate a new word in the decoder with the previous word and the attention weighted encoding\n for t in range(max(decode_lengths)):\n batch_size_t = sum([l > t for l in decode_lengths])\n attention_weighted_encoding, alpha = self.attention(encoder_out[:batch_size_t],\n h[:batch_size_t])\n gate = self.sigmoid(self.f_beta(h[:batch_size_t])) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings[:batch_size_t, t, :], attention_weighted_encoding], dim=1),\n (h[:batch_size_t], c[:batch_size_t])) # (batch_size_t, decoder_dim)\n preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n predictions[:batch_size_t, t, :] = preds\n alphas[:batch_size_t, t, :] = alpha\n\n return predictions, encoded_captions, decode_lengths, alphas, sort_ind" } ]
import time import torch.backends.cudnn as cudnn import torch.optim import torch.utils.data import torchvision.transforms as transforms from torch import nn from torch.nn.utils.rnn import pack_padded_sequence from models import Encoder, DecoderWithAttention from datasets import * from utils import * from nltk.translate.bleu_score import corpus_bleu
2,564
# Data parameters data_folder = './captioned_data' # folder with data files saved by create_input_files.py data_name = 'flickr30k' # base name shared by data files min_word_freq = 5 # Model parameters emb_dim = 512 # dimension of word embeddings attention_dim = 512 # dimension of attention linear layers decoder_dim = 512 # dimension of decoder RNN dropout = 0.5 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # sets device for model and PyTorch tensors cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead # Training parameters start_epoch = 0 epochs = 120 # number of epochs to train for (if early stopping is not triggered) epochs_since_improvement = 0 # keeps track of number of epochs since there's been an improvement in validation BLEU batch_size = 1 workers = 4 # for data-loading; right now, only 1 works with h5py encoder_lr = 1e-4 # learning rate for encoder if fine-tuning decoder_lr = 4e-4 # learning rate for decoder grad_clip = 5. # clip gradients at an absolute value of alpha_c = 1. # regularization parameter for 'doubly stochastic attention', as in the paper best_bleu4 = 0. # BLEU-4 score right now print_freq = 100 # print training/validation stats every __ batches fine_tune_encoder = False # fine-tune encoder? checkpoint = None # path to checkpoint, None if none cap_per_img = 5 def main(): """ Training and validation. """ global best_bleu4, epochs_since_improvement, checkpoint, start_epoch, fine_tune_encoder, data_name, word_map # Read word map word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json') with open(word_map_file, 'r') as j: word_map = json.load(j) # Initialize / load checkpoint if checkpoint is None: decoder = DecoderWithAttention(attention_dim=attention_dim, embed_dim=emb_dim, decoder_dim=decoder_dim, vocab_size=len(word_map), dropout=dropout).to(device) decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()), lr=decoder_lr)
# Data parameters data_folder = './captioned_data' # folder with data files saved by create_input_files.py data_name = 'flickr30k' # base name shared by data files min_word_freq = 5 # Model parameters emb_dim = 512 # dimension of word embeddings attention_dim = 512 # dimension of attention linear layers decoder_dim = 512 # dimension of decoder RNN dropout = 0.5 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # sets device for model and PyTorch tensors cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead # Training parameters start_epoch = 0 epochs = 120 # number of epochs to train for (if early stopping is not triggered) epochs_since_improvement = 0 # keeps track of number of epochs since there's been an improvement in validation BLEU batch_size = 1 workers = 4 # for data-loading; right now, only 1 works with h5py encoder_lr = 1e-4 # learning rate for encoder if fine-tuning decoder_lr = 4e-4 # learning rate for decoder grad_clip = 5. # clip gradients at an absolute value of alpha_c = 1. # regularization parameter for 'doubly stochastic attention', as in the paper best_bleu4 = 0. # BLEU-4 score right now print_freq = 100 # print training/validation stats every __ batches fine_tune_encoder = False # fine-tune encoder? checkpoint = None # path to checkpoint, None if none cap_per_img = 5 def main(): """ Training and validation. """ global best_bleu4, epochs_since_improvement, checkpoint, start_epoch, fine_tune_encoder, data_name, word_map # Read word map word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json') with open(word_map_file, 'r') as j: word_map = json.load(j) # Initialize / load checkpoint if checkpoint is None: decoder = DecoderWithAttention(attention_dim=attention_dim, embed_dim=emb_dim, decoder_dim=decoder_dim, vocab_size=len(word_map), dropout=dropout).to(device) decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()), lr=decoder_lr)
encoder = Encoder().to(device)
0
2023-12-27 11:48:51+00:00
4k
YYJeffrey/july_server
app/api/v2/comment.py
[ { "identifier": "db", "path": "app/model/base.py", "snippet": "class BaseModel(db.Model):\n def __getitem__(self, key):\n def init_on_load(self):\n def __set_fields(self):\n def _set_fields(self):\n def keys(self):\n def hide(self, *keys):\n def append(self, *keys):\n def status(self):\n def get_or_404(cls, **kwargs):\n def all_or_404(cls, **kwargs):\n def get_one(cls, **kwargs):\n def get_all(cls, **kwargs):\n def create(cls, commit: bool = True, **kwargs):\n def update(self, commit: bool = True, **kwargs):\n def save(self, commit: bool = True):\n def delete(self, commit: bool = True, soft: bool = True):\n def get_pagination(cls, not_del: bool = True, **kwargs):" }, { "identifier": "MessageCategory", "path": "app/lib/enums.py", "snippet": "class MessageCategory(Enum):\n \"\"\"\n 消息分类\n \"\"\"\n COMMENT = '评论'\n FOLLOWING = '关注'\n STAR = '收藏'" }, { "identifier": "NotFound", "path": "app/lib/exception.py", "snippet": "class NotFound(APIException):\n code = 404\n msg_code = 10011\n msg = '资源不存在'" }, { "identifier": "Success", "path": "app/lib/exception.py", "snippet": "class Success(APIException):\n code = 200\n msg_code = 0\n msg = '成功'" }, { "identifier": "Deleted", "path": "app/lib/exception.py", "snippet": "class Deleted(APIException):\n code = 200\n msg_code = 3\n msg = '删除成功'" }, { "identifier": "Created", "path": "app/lib/exception.py", "snippet": "class Created(APIException):\n code = 201\n msg_code = 1\n msg = '创建成功'" }, { "identifier": "RedPrint", "path": "app/lib/red_print.py", "snippet": "class RedPrint(object):\n \"\"\"\n 红图用于嵌套路由使用\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n self.mound = []\n\n def route(self, rule, **options):\n def decorator(func):\n if 'strict_slashes' not in options:\n options['strict_slashes'] = False\n self.mound.append((func, rule, options))\n return func\n\n return decorator\n\n def register(self, bp, url_prefix=None):\n if url_prefix is None:\n url_prefix = f\"/{self.name}\"\n\n for func, rule, options in self.mound:\n endpoint = f\"{self.name}/{options.pop('endpoint', func.__name__)}\"\n bp.add_url_rule(url_prefix + rule, endpoint, func, **options)" }, { "identifier": "paginator_schema", "path": "app/lib/schema.py", "snippet": "def paginator_schema(pagination: Pagination):\n \"\"\"\n 分页响应格式\n \"\"\"\n return {\n 'items': pagination.items,\n 'current_page': pagination.page,\n 'next_page': pagination.next_num,\n 'prev_page': pagination.prev_num,\n 'total_page': pagination.pages,\n 'total_count': pagination.total\n }" }, { "identifier": "auth", "path": "app/lib/token.py", "snippet": "def verify_token(token):\ndef generate_token(user_id):" }, { "identifier": "Comment", "path": "app/model/comment.py", "snippet": "class Comment(BaseModel):\n \"\"\"\n 评论模型\n \"\"\"\n __tablename__ = 'comment'\n\n content = Column(String(256), nullable=False, comment='内容')\n is_anon = Column(Boolean, default=False, comment='是否匿名')\n user_id = Column(String(32), nullable=False, index=True, comment='用户标识')\n topic_id = Column(String(32), nullable=False, index=True, comment='话题标识')\n comment_id = Column(String(32), index=True, comment='父评论标识')\n ip_belong = Column(String(128), comment='IP归属地')\n\n def __str__(self):\n return self.content\n\n def _set_fields(self):\n self.append('push_time')\n self._exclude.extend(['user_id'])\n\n @property\n def push_time(self):\n \"\"\"\n 发布时间\n \"\"\"\n if self.create_time is not None:\n return datetime_to_hint(self.create_time)\n return None\n\n @classmethod\n def get_commented(cls, user_id, topic_id):\n \"\"\"\n 获取该用户是否评论该话题\n \"\"\"\n return cls.get_one(user_id=user_id, topic_id=topic_id) is not None\n\n @classmethod\n def get_comment_count(cls, topic_id):\n \"\"\"\n 获取该话题的评论数量\n \"\"\"\n return db.session.query(func.count(cls.id)).filter_by(topic_id=topic_id).scalar()" }, { "identifier": "Message", "path": "app/model/message.py", "snippet": "class Message(BaseModel):\n \"\"\"\n 消息模型\n \"\"\"\n __tablename__ = 'message'\n\n content = Column(String(256), nullable=False, comment='内容')\n category = Column(Enum(MessageCategory), default=MessageCategory.COMMENT, comment='类型')\n is_read = Column(Boolean, default=False, comment='是否已读')\n is_anon = Column(Boolean, default=False, comment='是否匿名')\n user_id = Column(String(32), nullable=False, index=True, comment='用户标识')\n action_user_id = Column(String(32), nullable=False, index=True, comment='发起用户标识')\n topic_id = Column(String(32), index=True, comment='话题标识')\n\n def __str__(self):\n return self.content\n\n def _set_fields(self):\n self.append('push_time')\n self._exclude.extend(['action_user_id'])\n\n @property\n def push_time(self):\n \"\"\"\n 发布时间\n \"\"\"\n if self.create_time is not None:\n return datetime_to_hint(self.create_time)\n return None" }, { "identifier": "Topic", "path": "app/model/topic.py", "snippet": "class Topic(BaseModel):\n \"\"\"\n 话题模型\n \"\"\"\n __tablename__ = 'topic'\n\n title = Column(String(64), comment='标题')\n content = Column(String(1024), nullable=False, comment='内容')\n is_anon = Column(Boolean, default=False, comment='是否匿名')\n click_count = Column(Integer, default=0, comment='点击次数')\n star_count = Column(Integer, default=0, comment='收藏次数')\n comment_count = Column(Integer, default=0, comment='评论次数')\n images = Column(JSON, comment='图片')\n user_id = Column(String(32), nullable=False, index=True, comment='用户标识')\n video_id = Column(String(32), index=True, comment='视频标识')\n ip_belong = Column(String(128), comment='IP归属地')\n\n def __str__(self):\n return self.content\n\n def _set_fields(self):\n self.append('push_time')\n self._exclude.extend(['user_id'])\n\n @property\n def push_time(self):\n \"\"\"\n 发布时间\n \"\"\"\n if self.create_time is not None:\n return datetime_to_hint(self.create_time)\n return None\n\n @property\n def starred(self):\n \"\"\"\n 是否收藏\n \"\"\"\n if g.user is None:\n return False\n return Star.get_starred(user_id=g.user.id, topic_id=self.id)\n\n @property\n def commented(self):\n \"\"\"\n 是否评论\n \"\"\"\n if g.user is None:\n return False\n return Comment.get_commented(user_id=g.user.id, topic_id=self.id)" }, { "identifier": "create_comment_verify", "path": "app/service/comment.py", "snippet": "def create_comment_verify(form):\n \"\"\"\n 创建评论验证\n \"\"\"\n # 话题校验\n topic_id = form.get_data('topic_id')\n topic = Topic.get_one(id=topic_id)\n if topic is None:\n raise NotFound(msg='话题不存在')\n\n # 父评论校验\n comment_id = form.get_data('comment_id')\n if comment_id is not None:\n comment = Comment.get_one(id=comment_id, topic_id=topic_id)\n if comment is None:\n raise NotFound(msg='父评论不存在')\n reply_user_id = comment.user_id\n else:\n reply_user_id = topic.user_id\n\n # 内容文本校验\n content = form.get_data('content')\n client = get_mp_client()\n if content is not None:\n if not client.check_content(content=content, openid=g.user.openid):\n raise TextContentIllegal('内容不合法')\n\n # 更新IP归属地\n ip_belong = update_ip_belong()\n\n # 保存评论和消息 更新话题评论数\n with db.auto_commit():\n comment = Comment.create(commit=False, user_id=g.user.id, ip_belong=ip_belong, **form.dt_data)\n if reply_user_id != g.user.id:\n Message.create(\n commit=False,\n content=MessageCategory.COMMENT.value + '了你',\n category=MessageCategory.COMMENT,\n is_anon=comment.is_anon,\n user_id=reply_user_id,\n action_user_id=g.user.id,\n topic_id=topic.id\n )\n\n # 更新话题评论数\n topic.update(comment_count=Comment.get_comment_count(topic_id=topic.id))\n\n # 推送评论消息\n if current_app.config['COMMENT_TEMPLATE_ID'] is not None:\n reply_user = User.get_one(id=reply_user_id)\n send_comment_msg(\n content=content,\n openid=reply_user.openid,\n nickname=g.user.nickname,\n topic_id=topic.id\n )" }, { "identifier": "get_comment_list", "path": "app/service/comment.py", "snippet": "def get_comment_list(topic_id=None, user_id=None):\n \"\"\"\n 获取评论列表\n \"\"\"\n validator = PaginateValidator().dt_data\n page = validator.get('page')\n size = validator.get('size')\n\n topic_user = aliased(User)\n\n query = db.session.query(Comment, Topic, User, topic_user) \\\n .outerjoin(Topic, Comment.topic_id == Topic.id) \\\n .outerjoin(User, Comment.user_id == User.id) \\\n .outerjoin(topic_user, Topic.user_id == topic_user.id) \\\n .filter(Comment.delete_time.is_(None)) \\\n .filter(Topic.delete_time.is_(None))\n\n if topic_id is not None:\n query = query.filter(Comment.topic_id == topic_id).order_by(Comment.create_time)\n\n if user_id is not None:\n query = query.filter(Comment.user_id == user_id).order_by(Comment.create_time.desc())\n\n data = query.paginate(page=page, size=size)\n\n items = data.items\n for index, (comment, comment.topic, comment.user, comment.topic.user) in enumerate(items):\n if comment.is_anon:\n comment.user = None\n if comment.topic.is_anon:\n comment.topic.user = None\n if comment.comment_id is not None:\n comment.reply_user = db.session.query(User).outerjoin(Comment, User.id == Comment.user_id) \\\n .filter(Comment.id == comment.comment_id).first()\n else:\n comment.reply_user = None\n if comment.topic.video_id is not None:\n comment.topic.video = Video.get_one(id=comment.topic.video_id)\n else:\n comment.topic.video = None\n\n comment.append('topic', 'user', 'reply_user')\n comment.topic.append('user', 'video')\n items[index] = comment\n\n return data" }, { "identifier": "CreateCommentValidator", "path": "app/validator/forms.py", "snippet": "class CreateCommentValidator(Form):\n content = StringField('内容', validators=[DataRequired(message='内容不能为空')])\n topic_id = StringField('话题标识', validators=[DataRequired(message='话题标识不能为空')])\n comment_id = StringField('父评论标识')\n is_anon = BooleanField('是否匿名')" }, { "identifier": "GetCommentListValidator", "path": "app/validator/forms.py", "snippet": "class GetCommentListValidator(PaginateValidator):\n topic_id = StringField('话题标识')\n user_id = StringField('用户标识')" } ]
from flask import g from app import db from app.lib.enums import MessageCategory from app.lib.exception import NotFound, Success, Deleted, Created from app.lib.red_print import RedPrint from app.lib.schema import paginator_schema from app.lib.token import auth from app.model.comment import Comment from app.model.message import Message from app.model.topic import Topic from app.service.comment import create_comment_verify, get_comment_list from app.validator.forms import CreateCommentValidator, GetCommentListValidator
3,240
# -*- coding: utf-8 -*- """ :copyright: (c) 2023 by Jeffrey. :license: Apache 2.0, see LICENSE for more details. """ api = RedPrint('comment') @api.route('/', methods=['GET']) def get_comments(): """ 获取评论列表 """ form = GetCommentListValidator() topic_id = form.get_data('topic_id') user_id = form.get_data('user_id') comments = get_comment_list(topic_id=topic_id, user_id=user_id) return Success(data=paginator_schema(comments)) @api.route('/', methods=['POST']) @auth.login_required def create_comment(): """ 发布评论 """ form = CreateCommentValidator() create_comment_verify(form=form) return Created() @api.route('/<comment_id>', methods=['DELETE']) @auth.login_required def delete_comment(comment_id): """ 删除评论 """ comment = Comment.get_one(id=comment_id) if comment is None: raise NotFound(msg='评论不存在') topic = Topic.get_one(id=comment.topic_id)
# -*- coding: utf-8 -*- """ :copyright: (c) 2023 by Jeffrey. :license: Apache 2.0, see LICENSE for more details. """ api = RedPrint('comment') @api.route('/', methods=['GET']) def get_comments(): """ 获取评论列表 """ form = GetCommentListValidator() topic_id = form.get_data('topic_id') user_id = form.get_data('user_id') comments = get_comment_list(topic_id=topic_id, user_id=user_id) return Success(data=paginator_schema(comments)) @api.route('/', methods=['POST']) @auth.login_required def create_comment(): """ 发布评论 """ form = CreateCommentValidator() create_comment_verify(form=form) return Created() @api.route('/<comment_id>', methods=['DELETE']) @auth.login_required def delete_comment(comment_id): """ 删除评论 """ comment = Comment.get_one(id=comment_id) if comment is None: raise NotFound(msg='评论不存在') topic = Topic.get_one(id=comment.topic_id)
exist_msg = Message.get_one(category=MessageCategory.COMMENT, user_id=topic.user_id, action_user_id=g.user.id,
1
2023-12-30 04:08:35+00:00
4k
lchen1019/Image_Cropper
ISAT/widgets/canvas.py
[ { "identifier": "Polygon", "path": "ISAT/widgets/polygon.py", "snippet": "class Polygon(QtWidgets.QGraphicsPolygonItem):\n def __init__(self):\n super(Polygon, self).__init__(parent=None)\n self.line_width = 0\n self.hover_alpha = 150\n self.nohover_alpha = 80\n self.points = []\n self.vertexs = []\n self.category = ''\n self.group = 0\n self.iscrowd = 0\n self.note = ''\n\n self.rxmin, self.rxmax, self.rymin, self.rymax = 0, 0, 0, 0 # 用于绘画完成后,记录多边形的各边界,此处与points对应\n self.color = QtGui.QColor('#ff0000')\n self.is_drawing = True\n\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(QtGui.QBrush(self.color, QtCore.Qt.BrushStyle.FDiagPattern))\n\n self.setAcceptHoverEvents(True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsMovable, True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemSendsGeometryChanges, True)\n self.setZValue(1e5)\n\n def addPoint(self, point):\n print('addPoint')\n self.points.append(point)\n print(self.points)\n vertex = Vertex(self, self.color, 2)\n # 添加路径点\n self.scene().addItem(vertex)\n self.vertexs.append(vertex)\n vertex.setPos(point)\n\n def movePoint(self, index, point):\n if not 0 <= index < len(self.points):\n return\n self.points[index] = self.mapFromScene(point)\n\n self.redraw()\n if self.scene().mainwindow.load_finished and not self.is_drawing:\n self.scene().mainwindow.set_saved_state(False)\n\n def removePoint(self, index):\n if not self.points:\n return\n self.points.pop(index)\n vertex = self.vertexs.pop(index)\n self.scene().removeItem(vertex)\n del vertex\n self.redraw()\n\n def delete(self):\n self.points.clear()\n while self.vertexs:\n vertex = self.vertexs.pop()\n self.scene().removeItem(vertex)\n del vertex\n\n def moveVertex(self, index, point):\n if not 0 <= index < len(self.vertexs):\n return\n vertex = self.vertexs[index]\n vertex.setEnabled(False)\n vertex.setPos(point)\n vertex.setEnabled(True)\n\n def itemChange(self, change: 'QGraphicsItem.GraphicsItemChange', value: typing.Any):\n if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemSelectedHasChanged and not self.is_drawing: # 选中改变\n if self.isSelected():\n color = QtGui.QColor('#00A0FF')\n color.setAlpha(self.hover_alpha)\n self.setBrush(color)\n else:\n self.color.setAlpha(self.nohover_alpha)\n self.setBrush(self.color)\n self.scene().mainwindow.annos_dock_widget.set_selected(self) # 更新label面板\n\n if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemPositionChange: # ItemPositionHasChanged\n bias = value\n l, t, b, r = self.boundingRect().left(), self.boundingRect().top(), self.boundingRect().bottom(), self.boundingRect().right()\n if l + bias.x() < 0: bias.setX(-l)\n if r + bias.x() > self.scene().width(): bias.setX(self.scene().width()-r)\n if t + bias.y() < 0: bias.setY(-t)\n if b + bias.y() > self.scene().height(): bias.setY(self.scene().height()-b)\n\n for index, point in enumerate(self.points):\n self.moveVertex(index, point+bias)\n\n if self.scene().mainwindow.load_finished and not self.is_drawing:\n self.scene().mainwindow.set_saved_state(False)\n\n return super(Polygon, self).itemChange(change, value)\n\n def hoverEnterEvent(self, event: 'QGraphicsSceneHoverEvent'):\n if not self.is_drawing and not self.isSelected():\n self.color.setAlpha(self.hover_alpha)\n self.setBrush(self.color)\n super(Polygon, self).hoverEnterEvent(event)\n\n def hoverLeaveEvent(self, event: 'QGraphicsSceneHoverEvent'):\n if not self.is_drawing and not self.isSelected():\n self.color.setAlpha(self.nohover_alpha)\n self.setBrush(self.color)\n super(Polygon, self).hoverEnterEvent(event)\n\n def mouseDoubleClickEvent(self, event: 'QGraphicsSceneMouseEvent'):\n if event.button() == QtCore.Qt.MouseButton.LeftButton:\n self.scene().mainwindow.category_edit_widget.polygon = self\n self.scene().mainwindow.category_edit_widget.load_cfg()\n self.scene().mainwindow.category_edit_widget.show()\n\n def redraw(self):\n if len(self.points) < 1:\n return\n xs = [p.x() for p in self.points]\n ys = [p.y() for p in self.points]\n self.rxmin, self.rymin, self.rxmax, self.rymax = min(xs), min(ys), max(xs), max(ys)\n self.setPolygon(QtGui.QPolygonF(self.points))\n\n def change_color(self, color):\n self.color = color\n self.color.setAlpha(self.nohover_alpha)\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(self.color)\n for vertex in self.vertexs:\n vertex_color = self.color\n vertex_color.setAlpha(255)\n vertex.setPen(QtGui.QPen(vertex_color, self.line_width))\n vertex.setBrush(vertex_color)\n\n def set_drawed(self, category, group, iscrowd, note, color:QtGui.QColor, layer=None):\n self.is_drawing = False\n self.category = category\n if isinstance(group, str):\n group = 0 if group == '' else int(group)\n self.group = group\n self.iscrowd = iscrowd\n self.note = note\n\n self.color = color\n self.color.setAlpha(self.nohover_alpha)\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(self.color)\n if layer is not None:\n self.setZValue(layer)\n for vertex in self.vertexs:\n vertex.setColor(color)\n\n def calculate_area(self):\n area = 0\n num_points = len(self.points)\n for i in range(num_points):\n p1 = self.points[i]\n p2 = self.points[(i + 1) % num_points]\n d = p1.x() * p2.y() - p2.x() * p1.y()\n area += d\n return abs(area) / 2\n\n def load_object(self, object):\n segmentation = object.segmentation\n for x, y in segmentation:\n point = QtCore.QPointF(x, y)\n self.addPoint(point)\n color = self.scene().mainwindow.category_color_dict.get(object.category, '#000000')\n self.set_drawed(object.category, object.group, object.iscrowd, object.note, QtGui.QColor(color), object.layer) # ...\n\n def to_object(self):\n if self.is_drawing:\n return None\n segmentation = []\n for point in self.points:\n point = point + self.pos()\n segmentation.append((round(point.x(), 2), round(point.y(), 2)))\n xmin = self.boundingRect().x() + self.pos().x()\n ymin = self.boundingRect().y() + self.pos().y()\n xmax = xmin + self.boundingRect().width()\n ymax = ymin + self.boundingRect().height()\n\n object = Object(self.category, group=self.group, segmentation=segmentation,\n area=self.calculate_area(), layer=self.zValue(), bbox=(xmin, ymin, xmax, ymax), iscrowd=self.iscrowd, note=self.note)\n return object" }, { "identifier": "Vertex", "path": "ISAT/widgets/polygon.py", "snippet": "class Vertex(QtWidgets.QGraphicsPathItem):\n def __init__(self, polygon, color, nohover_size=2):\n super(Vertex, self).__init__()\n self.polygon = polygon\n self.color = color\n self.color.setAlpha(255)\n self.nohover_size = nohover_size\n self.hover_size = self.nohover_size + 2\n self.line_width = 0\n\n self.nohover = QtGui.QPainterPath()\n self.nohover.addEllipse(QtCore.QRectF(-self.nohover_size//2, -self.nohover_size//2, self.nohover_size, self.nohover_size))\n self.hover = QtGui.QPainterPath()\n self.hover.addRect(QtCore.QRectF(-self.nohover_size//2, -self.nohover_size//2, self.nohover_size, self.nohover_size))\n\n self.setPath(self.nohover)\n self.setBrush(self.color)\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsMovable, True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemSendsGeometryChanges, True)\n self.setAcceptHoverEvents(True)\n self.setZValue(1e5)\n\n def setColor(self, color):\n self.color = QtGui.QColor(color)\n self.color.setAlpha(255)\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(self.color)\n\n def itemChange(self, change: 'QtWidgets.QGraphicsItem.GraphicsItemChange', value: typing.Any):\n if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemSelectedHasChanged:\n self.scene().mainwindow.actionDelete.setEnabled(self.isSelected())\n if self.isSelected():\n selected_color = QtGui.QColor('#00A0FF')\n self.setBrush(selected_color)\n else:\n self.setBrush(self.color)\n\n if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemPositionChange and self.isEnabled():\n # 限制顶点移动到图外\n if value.x() < 0:\n value.setX(0)\n if value.x() > self.scene().width()-1:\n value.setX(self.scene().width()-1)\n if value.y() < 0:\n value.setY(0)\n if value.y() > self.scene().height()-1:\n value.setY(self.scene().height()-1)\n index = self.polygon.vertexs.index(self)\n self.polygon.movePoint(index, value)\n\n return super(Vertex, self).itemChange(change, value)\n \n def hoverEnterEvent(self, event: 'QGraphicsSceneHoverEvent'):\n if self.scene().mode == STATUSMode.CREATE: # CREATE\n self.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor))\n else: # EDIT, VIEW\n self.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.OpenHandCursor))\n if not self.isSelected():\n self.setBrush(QtGui.QColor(255, 255, 255, 255))\n self.setPath(self.hover)\n super(Vertex, self).hoverEnterEvent(event)\n\n def hoverLeaveEvent(self, event: 'QGraphicsSceneHoverEvent'):\n if not self.isSelected():\n self.setBrush(self.color)\n self.setPath(self.nohover)\n super(Vertex, self).hoverLeaveEvent(event)" }, { "identifier": "PromptPoint", "path": "ISAT/widgets/polygon.py", "snippet": "class PromptPoint(QtWidgets.QGraphicsPathItem):\n def __init__(self, pos, type=0):\n super(PromptPoint, self).__init__()\n self.color = QtGui.QColor('#0000FF') if type==0 else QtGui.QColor('#00FF00')\n self.color.setAlpha(255)\n self.painterpath = QtGui.QPainterPath()\n self.painterpath.addEllipse(\n QtCore.QRectF(-1, -1, 2, 2))\n self.setPath(self.painterpath)\n self.setBrush(self.color)\n self.setPen(QtGui.QPen(self.color, 3))\n self.setZValue(1e5)\n\n self.setPos(pos)" }, { "identifier": "STATUSMode", "path": "ISAT/configs.py", "snippet": "class STATUSMode(Enum):\n VIEW = 0\n CREATE = 1\n EDIT = 2" }, { "identifier": "CLICKMode", "path": "ISAT/configs.py", "snippet": "class CLICKMode(Enum):\n POSITIVE = 0\n NEGATIVE = 1" }, { "identifier": "DRAWMode", "path": "ISAT/configs.py", "snippet": "class DRAWMode(Enum):\n POLYGON = 0\n SEGMENTANYTHING = 1" }, { "identifier": "CONTOURMode", "path": "ISAT/configs.py", "snippet": "class CONTOURMode(Enum):\n SAVE_MAX_ONLY = 0 # 只保留最多顶点的mask(一般为最大面积)\n SAVE_EXTERNAL = 1 # 只保留外轮廓\n SAVE_ALL = 2 # 保留所有轮廓" } ]
from PyQt5 import QtWidgets, QtGui, QtCore from PyQt5.QtGui import QPen, QBrush, QColor from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QApplication, QGraphicsScene, QGraphicsView, QGraphicsRectItem from ISAT.widgets.polygon import Polygon, Vertex, PromptPoint from ISAT.configs import STATUSMode, CLICKMode, DRAWMode, CONTOURMode from PIL import Image import numpy as np import cv2 import time # 拖动鼠标描点
3,307
# -*- coding: utf-8 -*- # @Author : LG class AnnotationScene(QtWidgets.QGraphicsScene): def __init__(self, mainwindow): super(AnnotationScene, self).__init__() self.mainwindow = mainwindow self.image_item:QtWidgets.QGraphicsPixmapItem = None self.image_data = None self.current_graph:QGraphicsRectItem = None self.mode = STATUSMode.VIEW
# -*- coding: utf-8 -*- # @Author : LG class AnnotationScene(QtWidgets.QGraphicsScene): def __init__(self, mainwindow): super(AnnotationScene, self).__init__() self.mainwindow = mainwindow self.image_item:QtWidgets.QGraphicsPixmapItem = None self.image_data = None self.current_graph:QGraphicsRectItem = None self.mode = STATUSMode.VIEW
self.click = CLICKMode.POSITIVE
4
2023-12-24 16:19:16+00:00
4k
farhad-dalirani/MultiObjectTracking-YOLO-NAS-DeepSORT
tracking_by_detection.py
[ { "identifier": "DrawingTrackingInfo", "path": "drawing_util.py", "snippet": "class DrawingTrackingInfo:\n\n def __init__(self):\n self.trajectory_len = 50\n self.max_color = 150\n self.tracks_id_colors = np.random.randint(low=0, high=255, size=(self.max_color, 3), dtype='uint8')\n self.tracks = {}\n\n def draw_tracking_info(self, image, bounding_boxes, tracking_ids):\n \"\"\"\n Create image of bounding boxes and tracking IDs on the input image.\n\n Parameters:\n - image: The input image (numpy array).\n - bounding_boxes: List of n * 4 bounding boxes in the format (x, y, width, height).\n - tracking_ids: List of tracking IDs corresponding to each bounding box.\n\n Returns:\n - None (displays the image with bounding boxes and tracking IDs).\n \"\"\"\n # Create a copy of image\n image_cp = np.copy(image)\n # Iterate through each bounding box and tracking ID\n for bbox, track_id in zip(bounding_boxes, tracking_ids):\n x1, y1, x2, y2 = bbox\n \n # Draw the bounding box on the image\n color_r, color_g, color_b = self.tracks_id_colors[track_id % self.max_color, :]\n color_r, color_g, color_b = int(color_r), int(color_g), int(color_b)\n color = tuple([color_r, color_g, color_b])\n cv.rectangle(image_cp, (x1, y1), (x2, y2), color, 2)\n\n # Keep record of previous position of each unique track id\n if track_id not in self.tracks:\n self.tracks[track_id] = [bbox]\n else:\n self.tracks[track_id].append(bbox)\n\n # Draw trajectory of tracked object in some of the last frames\n for bbox_i in self.tracks[track_id][-self.trajectory_len:]:\n circle_x, circle_y = (bbox_i[0] + bbox_i[2])//2, bbox_i[3]\n cv.circle(image_cp, (circle_x, circle_y), 3, color, 2) \n\n # Display the tracking ID near the bounding box\n text = f\"ID: {track_id}\"\n cv.putText(image_cp, text, (x1, y1 - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\n return image_cp" }, { "identifier": "DeepSort", "path": "deep_sort_pytorch_master/deep_sort/deep_sort.py", "snippet": "class DeepSort(object):\n def __init__(self, model_path, model_config=None, max_dist=0.2, min_confidence=0.3, nms_max_overlap=1.0, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True):\n self.min_confidence = min_confidence\n self.nms_max_overlap = nms_max_overlap\n\n if model_config is None:\n self.extractor = Extractor(model_path, use_cuda=use_cuda)\n ###\n # Changed from orginal repository\n ###\n #else:\n # self.extractor = FastReIDExtractor(model_config, model_path, use_cuda=use_cuda)\n\n max_cosine_distance = max_dist\n metric = NearestNeighborDistanceMetric(\"cosine\", max_cosine_distance, nn_budget)\n self.tracker = Tracker(metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)\n\n def update(self, bbox_xywh, confidences, ori_img):\n self.height, self.width = ori_img.shape[:2]\n # generate detections\n features = self._get_features(bbox_xywh, ori_img)\n bbox_tlwh = self._xywh_to_tlwh(bbox_xywh)\n detections = [Detection(bbox_tlwh[i], conf, features[i]) for i,conf in enumerate(confidences) if conf>self.min_confidence]\n\n # run on non-maximum supression\n boxes = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n indices = non_max_suppression(boxes, self.nms_max_overlap, scores)\n detections = [detections[i] for i in indices]\n\n # update tracker\n self.tracker.predict()\n self.tracker.update(detections)\n\n # output bbox identities\n outputs = []\n for track in self.tracker.tracks:\n if not track.is_confirmed() or track.time_since_update > 1:\n continue\n box = track.to_tlwh()\n x1,y1,x2,y2 = self._tlwh_to_xyxy(box)\n track_id = track.track_id\n outputs.append(np.array([x1,y1,x2,y2,track_id], dtype=np.int))\n if len(outputs) > 0:\n outputs = np.stack(outputs,axis=0)\n return outputs\n\n\n \"\"\"\n TODO:\n Convert bbox from xc_yc_w_h to xtl_ytl_w_h\n Thanks [email protected] for reporting this bug!\n \"\"\"\n @staticmethod\n def _xywh_to_tlwh(bbox_xywh):\n if isinstance(bbox_xywh, np.ndarray):\n bbox_tlwh = bbox_xywh.copy()\n elif isinstance(bbox_xywh, torch.Tensor):\n bbox_tlwh = bbox_xywh.clone()\n bbox_tlwh[:,0] = bbox_xywh[:,0] - bbox_xywh[:,2]/2.\n bbox_tlwh[:,1] = bbox_xywh[:,1] - bbox_xywh[:,3]/2.\n return bbox_tlwh\n\n\n def _xywh_to_xyxy(self, bbox_xywh):\n x,y,w,h = bbox_xywh\n x1 = max(int(x-w/2),0)\n x2 = min(int(x+w/2),self.width-1)\n y1 = max(int(y-h/2),0)\n y2 = min(int(y+h/2),self.height-1)\n return x1,y1,x2,y2\n\n def _tlwh_to_xyxy(self, bbox_tlwh):\n \"\"\"\n TODO:\n Convert bbox from xtl_ytl_w_h to xc_yc_w_h\n Thanks [email protected] for reporting this bug!\n \"\"\"\n x,y,w,h = bbox_tlwh\n x1 = max(int(x),0)\n x2 = min(int(x+w),self.width-1)\n y1 = max(int(y),0)\n y2 = min(int(y+h),self.height-1)\n return x1,y1,x2,y2\n\n def _xyxy_to_tlwh(self, bbox_xyxy):\n x1,y1,x2,y2 = bbox_xyxy\n\n t = x1\n l = y1\n w = int(x2-x1)\n h = int(y2-y1)\n return t,l,w,h\n \n def _get_features(self, bbox_xywh, ori_img):\n im_crops = []\n for box in bbox_xywh:\n x1,y1,x2,y2 = self._xywh_to_xyxy(box)\n im = ori_img[y1:y2,x1:x2]\n im_crops.append(im)\n if im_crops:\n features = self.extractor(im_crops)\n else:\n features = np.array([])\n return features" }, { "identifier": "ReadData", "path": "data_util.py", "snippet": "class ReadData:\n\n def __init__(self, input_type, input_image_dir=None, input_video_path=None):\n \"\"\"\n Initializes an instance of the class with the \n specified input type, image directory, and video path.\n \"\"\"\n if input_type != 'images' and input_type != 'video':\n raise ValueError('Input type is not correct!')\n\n if input_type == 'images':\n self.path_images = self.get_sorted_image_paths(path_img_dir=input_image_dir) \n self.data_generator = self.get_next_image_from_image_directory\n elif input_type == 'video':\n self.video_path = input_video_path\n self.data_generator = self.get_next_image_from_video\n \n def get_sorted_image_paths(self, path_img_dir):\n \"\"\"\n Retrieve and return a list of sorted full paths for image files in the specified directory.\n\n Parameters:\n - path_img_dir (str): The path of the directory containing image files.\n\n Returns:\n - list: A sorted list of full paths for image files in the directory.\n \"\"\"\n\n # Get a list of all files in the folder\n files = os.listdir(path_img_dir)\n\n # Filter only the files with image extensions (you can customize this list)\n image_extensions = ['.jpg', '.jpeg', '.png', '.gif']\n image_files = [file for file in files if any(file.lower().endswith(ext) for ext in image_extensions)]\n\n # Sort the image files by name\n sorted_image_files = sorted(image_files)\n\n # Create a list of full paths for the sorted image files\n sorted_image_paths = [os.path.join(path_img_dir, file) for file in sorted_image_files]\n\n return sorted_image_paths\n\n def get_next_image_from_image_directory(self):\n \"\"\"\n Generator function that yields images from the specified image directory.\n \"\"\"\n for path_img_i in self.path_images:\n img_i = cv.imread(filename=path_img_i)\n if len(img_i.shape) == 3 and img_i.shape[2] == 3: \n img_i = cv.cvtColor(img_i, cv.COLOR_BGR2RGB)\n yield img_i\n return\n \n def get_next_image_from_video(self):\n \"\"\"\n Generator function to read and yield consecutive frames from a video file.\n \"\"\"\n # Input video reader\n in_video = cv.VideoCapture(self.video_path)\n\n while True:\n ret, img_i = in_video.read()\n if ret == False:\n break\n if len(img_i.shape) == 3 and img_i.shape[2] == 3: \n img_i = cv.cvtColor(img_i, cv.COLOR_BGR2RGB)\n yield img_i\n \n in_video.release()\n return" } ]
import os import torch import cv2 as cv from super_gradients.training import models from drawing_util import DrawingTrackingInfo from deep_sort_pytorch_master.deep_sort import DeepSort from data_util import ReadData from datetime import datetime
2,697
def tracking_by_detection(config): # Check if the output folder exists if not os.path.exists(config['output_folder']): # Create it os.makedirs(config['output_folder']) print(f"Folder '{config['output_folder']}' created.") # Select device if config['detector_device'] == 'cuda:0': device_detector = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') elif config['detector_device'] == 'cpu': device_detector = torch.device('cpu') else: raise ValueError('Requested device name is not correct!') print('Device: {}'.format(device_detector)) # Object for reading data ds_object = ReadData(input_type=config['input_type'], input_image_dir=config['images_folder'], input_video_path=config['input_video_path']) ds_generator = ds_object.data_generator() # Load YOLO-NAS-Medium for object detecion detector = models.get(model_name=config['detector_arch'], pretrained_weights=config['pretrained_dataset']).to(device_detector) # Tracking info drawing object
def tracking_by_detection(config): # Check if the output folder exists if not os.path.exists(config['output_folder']): # Create it os.makedirs(config['output_folder']) print(f"Folder '{config['output_folder']}' created.") # Select device if config['detector_device'] == 'cuda:0': device_detector = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') elif config['detector_device'] == 'cpu': device_detector = torch.device('cpu') else: raise ValueError('Requested device name is not correct!') print('Device: {}'.format(device_detector)) # Object for reading data ds_object = ReadData(input_type=config['input_type'], input_image_dir=config['images_folder'], input_video_path=config['input_video_path']) ds_generator = ds_object.data_generator() # Load YOLO-NAS-Medium for object detecion detector = models.get(model_name=config['detector_arch'], pretrained_weights=config['pretrained_dataset']).to(device_detector) # Tracking info drawing object
draw_obj = DrawingTrackingInfo()
0
2023-12-26 15:22:02+00:00
4k
harvestingmoon/StableVisionBot
bot.py
[ { "identifier": "BackEnd", "path": "backend.py", "snippet": "class BackEnd:\n def __init__(self,model_id) -> None:\n self.model = None\n self.curr_picture = None \n self.final_img = None\n self.call = {1:False,2:False}\n self.model_id = (model_id if model_id else \"stabilityai/stable-diffusion-2\")\n def change_picture(self,array): # picture received from user is a byte array need to convert into image \n picture = io.BytesIO(array)\n image = Image.open(picture).convert(\"RGB\")\n self.curr_picture = image # store it temp \n def final_(self,img):\n self.final_img = img\n def get_final(self):\n return self.final_img\n def get_picture(self):\n return self.curr_picture\n def change_model(self,model):\n self.model = model\n def get_model(self):\n return self.model\n def get_call(self):\n return self.call\n def call_engine(self,type):\n model_id = self.model_id\n call = self.get_call()\n device = (\"cuda\" if torch.cuda.is_available() else \"cpu\")\n if not call[type]:\n if True in list(call.values()):\n for k,v in call.items():\n if v == True:\n call[k] = False\n if type == 1:\n scheduler = DDIMScheduler.from_pretrained(model_id,subfolder = \"scheduler\")\n pipe = StableDiffusionPipeline.from_pretrained(model_id,scheduler= scheduler, torch_dtype = torch.float16)\n else:\n pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id,torch_dtype = torch.float16)\n pipe = pipe.to(device)\n self.model = pipe\n call[type] = True\n return self.get_model()" }, { "identifier": "post_process", "path": "backend.py", "snippet": "def post_process(image,to_doc = True):\n def resize_image(image, max_size):\n quality = 95\n while True:\n with io.BytesIO() as file:\n image.save(file, format='JPEG', quality=quality)\n size = file.tell() / 1024 # Size in KB\n if size <= max_size:\n break\n quality -= 5 # Decrease quality by 5. You can change it as needed.\n if quality < 0:\n raise Exception(\"Cannot reduce image size under the limit without losing too much quality.\")\n return image\n \n def enforce_ratio(image,max_ratio): # stick to 20; 1\n width, height = image.size\n ratio = width / height\n\n if ratio > max_ratio:\n new_width = height * max_ratio\n image = image.resize((int(new_width), height), Image.ANTIALIAS)\n elif ratio < 1 / max_ratio:\n new_height = width * max_ratio\n image = image.resize((width, int(new_height)), Image.ANTIALIAS)\n\n return image\n\n def limit_pixels(image, max_pixels):\n width, height = image.size\n current_pixels = width * height\n\n if current_pixels > max_pixels:\n # Calculate the scale factor\n scale_factor = (max_pixels / current_pixels) ** 0.5\n new_width = int(width * scale_factor)\n new_height = int(height * scale_factor)\n image = image.resize((new_width, new_height), Image.ANTIALIAS)\n\n return image\n\n def pil_to_file(image):\n file = io.BytesIO()\n if to_doc:\n image.save(file, format='PDF')\n else:\n image.save(file,format = 'JPG')\n file.seek(0)\n return file\n if not to_doc:\n image = resize_image(image, 9 * 1024)\n image = enforce_ratio(image,18)\n image = limit_pixels(image, 8000)\n image = pil_to_file(image)\n return image" } ]
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update,InlineKeyboardButton,InlineKeyboardMarkup from telegram.ext import ( Application, CommandHandler, ContextTypes, ConversationHandler, MessageHandler, CallbackQueryHandler, filters, CallbackContext, ) from backend import BackEnd,post_process from PIL import Image import numpy as np import json import logging import yaml import emoji import asyncio
2,207
# Simple telegram bot that takes uses stable diffusion ''' Importing YAML''' with open("config .yaml", "r") as f: config = yaml.safe_load(f) model = config['model'] api_key = config['API_KEY'] ''' States for bot''' ONE,TWO,DOCUMENT,PHOTO = range(4) START,T2IMG,T2IMG2,IMG2IMG,IMG2IMG2,OUTPUT= range(6) ''' User logging''' logging.basicConfig( format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level = logging.INFO ) logger = logging.getLogger(__name__) ''' Important pipeline for stable diffusion''' engine = BackEnd(model) ''' Function for bot''' async def startcommand(update,context): keyboard = [ [ InlineKeyboardButton("Text To Image", callback_data = str(ONE)), InlineKeyboardButton("Image Editing",callback_data = str(TWO))], ] reply_markup = InlineKeyboardMarkup(keyboard) await update.message.reply_text("StableVision Bot v1.1 \U0001F308\ \nby harvestingm00n \U0001F343\ \n\n\nPlease select an option!",reply_markup = reply_markup) return START async def info(update: Update, _:CallbackContext) -> None: await update.message.reply_text("StableVision Bot v1.1 \U0001F308\ \n\n Technical Info: \ \n\n Model: Stable Diffusion v2.0 \U0001F3A8 \ \n\n Pipeline: HuggingFace \U0001F917 \ \n\n GPU: min. 6gb VRAM \ ") async def text_to_image(update: Update, _: CallbackContext) -> int: query = update.callback_query query.answer() await query.edit_message_text("Please input the text you want to convert to image \u2328\ \nIf you are using this in a group chat please reply to the bot \ \n\nNote: This may take a while...") return T2IMG async def image_to_image(update: Update, _: CallbackContext) -> int: query = update.callback_query query.answer() await query.edit_message_text("Please input the image you want to edit \U0001F5BC\ \n\nIf you are using this in a group chat please reply to the bot") return IMG2IMG async def img2img(update: Update, context: CallbackContext) -> None: user_photo = await update.message.photo[-1].get_file() array = await user_photo.download_as_bytearray() engine.change_picture(array) # temporarily storing the photo there ( will always override no matter what) await update.message.reply_text("Please input the text you want to convert to image \u2328\ \nIf you are using this in a group chat please reply to the bot \ \n\nNote: This may take a while...") return IMG2IMG2 async def t2img(update: Update, context: CallbackContext) -> None: user_input = update.message.text logging.info("User of text:",user_input) pipe = engine.call_engine(1) await update.message.reply_text(emoji.emojize("Painting! This may take awhile... :paintbrush:")) images = pipe(prompt = user_input,num_inference_steps = 50).images[0] engine.final_(images) keyboard = [[InlineKeyboardButton("Send as Document",callback_data = str(DOCUMENT)), InlineKeyboardButton("Send as Photo",callback_data = str(PHOTO))]] reply_markup = InlineKeyboardMarkup(keyboard) await update.message.reply_text("Please select an option! \ \n\n Note: Sending as photo have lower quality",reply_markup = reply_markup) # await context.bot.send_document(chat_id=update.effective_chat.id,document = final_images ,filename ='photo.pdf', caption = f"Generated Image of {user_input}") # await context.bot.send_photo(chat_id=update.effective_chat.id,photo = final_images ,filename ='photo.jpg', caption = f"Generated Image of {user_input}") return OUTPUT async def t2img2(update: Update, context: CallbackContext) -> None: user_input = update.message.text logging.info("User of text:",user_input) pipe = engine.call_engine(2) await update.message.reply_text(emoji.emojize("Painting! This may take awhile... :paintbrush:")) images = pipe(prompt = user_input,image = engine.get_picture()).images[0] engine.final_(images) keyboard = [[InlineKeyboardButton("Send as Document",callback_data = str(DOCUMENT)), InlineKeyboardButton("Send as Photo",callback_data = str(PHOTO))]] reply_markup = InlineKeyboardMarkup(keyboard) await update.message.reply_text("Please select an option! \ \n\n Note: Sending as photo have lower quality",reply_markup = reply_markup) # await context.bot.send_document(chat_id=update.effective_chat.id,document = final_images ,filename ='photo.pdf', caption = f"Generated Image of {user_input}") # await context.bot.send_photo(chat_id=update.effective_chat.id,photo = final_images ,filename ='photo.jpg', caption = f"Generated Image of {user_input}") return OUTPUT async def document(update: Update, context: CallbackContext) -> None: query = update.callback_query final_image = engine.get_final()
# Simple telegram bot that takes uses stable diffusion ''' Importing YAML''' with open("config .yaml", "r") as f: config = yaml.safe_load(f) model = config['model'] api_key = config['API_KEY'] ''' States for bot''' ONE,TWO,DOCUMENT,PHOTO = range(4) START,T2IMG,T2IMG2,IMG2IMG,IMG2IMG2,OUTPUT= range(6) ''' User logging''' logging.basicConfig( format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level = logging.INFO ) logger = logging.getLogger(__name__) ''' Important pipeline for stable diffusion''' engine = BackEnd(model) ''' Function for bot''' async def startcommand(update,context): keyboard = [ [ InlineKeyboardButton("Text To Image", callback_data = str(ONE)), InlineKeyboardButton("Image Editing",callback_data = str(TWO))], ] reply_markup = InlineKeyboardMarkup(keyboard) await update.message.reply_text("StableVision Bot v1.1 \U0001F308\ \nby harvestingm00n \U0001F343\ \n\n\nPlease select an option!",reply_markup = reply_markup) return START async def info(update: Update, _:CallbackContext) -> None: await update.message.reply_text("StableVision Bot v1.1 \U0001F308\ \n\n Technical Info: \ \n\n Model: Stable Diffusion v2.0 \U0001F3A8 \ \n\n Pipeline: HuggingFace \U0001F917 \ \n\n GPU: min. 6gb VRAM \ ") async def text_to_image(update: Update, _: CallbackContext) -> int: query = update.callback_query query.answer() await query.edit_message_text("Please input the text you want to convert to image \u2328\ \nIf you are using this in a group chat please reply to the bot \ \n\nNote: This may take a while...") return T2IMG async def image_to_image(update: Update, _: CallbackContext) -> int: query = update.callback_query query.answer() await query.edit_message_text("Please input the image you want to edit \U0001F5BC\ \n\nIf you are using this in a group chat please reply to the bot") return IMG2IMG async def img2img(update: Update, context: CallbackContext) -> None: user_photo = await update.message.photo[-1].get_file() array = await user_photo.download_as_bytearray() engine.change_picture(array) # temporarily storing the photo there ( will always override no matter what) await update.message.reply_text("Please input the text you want to convert to image \u2328\ \nIf you are using this in a group chat please reply to the bot \ \n\nNote: This may take a while...") return IMG2IMG2 async def t2img(update: Update, context: CallbackContext) -> None: user_input = update.message.text logging.info("User of text:",user_input) pipe = engine.call_engine(1) await update.message.reply_text(emoji.emojize("Painting! This may take awhile... :paintbrush:")) images = pipe(prompt = user_input,num_inference_steps = 50).images[0] engine.final_(images) keyboard = [[InlineKeyboardButton("Send as Document",callback_data = str(DOCUMENT)), InlineKeyboardButton("Send as Photo",callback_data = str(PHOTO))]] reply_markup = InlineKeyboardMarkup(keyboard) await update.message.reply_text("Please select an option! \ \n\n Note: Sending as photo have lower quality",reply_markup = reply_markup) # await context.bot.send_document(chat_id=update.effective_chat.id,document = final_images ,filename ='photo.pdf', caption = f"Generated Image of {user_input}") # await context.bot.send_photo(chat_id=update.effective_chat.id,photo = final_images ,filename ='photo.jpg', caption = f"Generated Image of {user_input}") return OUTPUT async def t2img2(update: Update, context: CallbackContext) -> None: user_input = update.message.text logging.info("User of text:",user_input) pipe = engine.call_engine(2) await update.message.reply_text(emoji.emojize("Painting! This may take awhile... :paintbrush:")) images = pipe(prompt = user_input,image = engine.get_picture()).images[0] engine.final_(images) keyboard = [[InlineKeyboardButton("Send as Document",callback_data = str(DOCUMENT)), InlineKeyboardButton("Send as Photo",callback_data = str(PHOTO))]] reply_markup = InlineKeyboardMarkup(keyboard) await update.message.reply_text("Please select an option! \ \n\n Note: Sending as photo have lower quality",reply_markup = reply_markup) # await context.bot.send_document(chat_id=update.effective_chat.id,document = final_images ,filename ='photo.pdf', caption = f"Generated Image of {user_input}") # await context.bot.send_photo(chat_id=update.effective_chat.id,photo = final_images ,filename ='photo.jpg', caption = f"Generated Image of {user_input}") return OUTPUT async def document(update: Update, context: CallbackContext) -> None: query = update.callback_query final_image = engine.get_final()
final_image = post_process(final_image,to_doc = True)
1
2023-12-22 07:25:26+00:00
4k
khabbazan/Mattermost-Subscriptions
mattermostsub/schema.py
[ { "identifier": "UserCreate", "path": "apps/account/gql/mutations.py", "snippet": "class UserCreate(graphene.Mutation):\n \"\"\"\n UserCreate Mutation\n This mutation is used to create a new user. It involves creating a user in the Django system\n and additionally in an external system (Mattermost) through an admin proxy.\n \"\"\"\n\n class Arguments:\n username = graphene.Argument(graphene.String, required=True, description=\"Username for the new user account.\")\n password = graphene.Argument(graphene.String, required=True, description=\"Password for the new user account.\")\n email = graphene.Argument(graphene.String, required=True, description=\"Email address for the new user account.\")\n\n Output = ResponseBase\n\n def mutate(self, info, username, password, email):\n \"\"\"\n Mutate function for the UserCreate Mutation.\n Creates a new user in the database and Mattermost, returns a success response upon completion.\n \"\"\"\n with transaction.atomic():\n user = User.objects.create_user(username=username, email=email, password=password)\n matter_admin = MattermostAdminProxy()\n creation_status = matter_admin.create_user(user_data={\"username\": user.username, \"email\": user.email, \"password\": user.password[:30]})\n add_to_team_status = matter_admin.add_user_to_team(user_identifier=user.username)\n if not (creation_status and add_to_team_status):\n raise Exception(\"mattermost operation failed.\")\n\n return ResponseBase(status=http_code.HTTP_200_OK, status_code=http_code.HTTP_200_OK_CODE, message=\"User created successfully!\")" }, { "identifier": "UserGetToken", "path": "apps/account/gql/mutations.py", "snippet": "class UserGetToken(graphene.Mutation):\n \"\"\"\n UserGetToken Mutation\n This mutation is used to retrieve an authentication token for an existing user in the system.\n It validates the user credentials and returns a JWT token and refresh token upon successful authentication.\n \"\"\"\n\n class Arguments:\n username = graphene.Argument(graphene.String, required=True, description=\"Username of the user for authentication.\")\n password = graphene.Argument(graphene.String, required=True, description=\"Password of the user for authentication.\")\n\n Output = ResponseUnion\n\n def mutate(self, info, username, password):\n \"\"\"\n Mutate function for the UserGetToken Mutation.\n Authenticates a user and provides an authentication token and a refresh token if successful.\n \"\"\"\n user = authenticate(username=username, password=password)\n\n if user:\n return ResponseWithToken(\n status=http_code.HTTP_200_OK,\n status_code=http_code.HTTP_200_OK_CODE,\n message=\"Login Successfully!\",\n token=get_token(user),\n refresh_token=create_refresh_token(user),\n metadata={k: v for k, v in model_to_dict(user).items() if k in [\"id\", \"username\", \"email\"]},\n )\n else:\n return ResponseBase(\n status=http_code.HTTP_404_NOT_FOUND,\n status_code=http_code.HTTP_404_NOT_FOUND_CODE,\n message=\"Login Failed!\",\n )" }, { "identifier": "UserList", "path": "apps/account/gql/queries.py", "snippet": "class UserList(graphene.ObjectType):\n \"\"\"\n GraphQL ObjectType for listing users.\n This class provides the functionality to query a list of users with pagination support.\n \"\"\"\n\n user_list = graphene.Field(\n UserListType,\n page=graphene.Argument(PageType, description=\"Pagination details including page size and page number.\"),\n description=\"Query to retrieve a paginated list of users.\",\n )\n\n @login_required\n def resolve_user_list(root, info, **kwargs):\n \"\"\"\n Resolver for the user_list query.\n Retrieves a list of users based on pagination parameters. Requires user authentication.\n\n Args:\n root (Object): Root object, not used in this query.\n info (ResolveInfo): Information about the query.\n **kwargs: Keyword arguments containing pagination details.\n\n Returns:\n UserListType: A paginated list of users along with total page count and user count.\n \"\"\"\n\n result = User.objects.all()\n\n # Extracting pagination details from kwargs with default values\n page = kwargs.get(\"page\", {\"page_size\": 10, \"page_number\": 1})\n page_number = page.get(\"page_number\")\n page_size = page.get(\"page_size\")\n\n # Paginating the result set\n response = Paginator(result, page_size)\n page_count = math.ceil(result.count() / page_size) # Calculating the total number of pages\n count = result.count() # Total number of users\n\n # Constructing the UserListType with paginated data\n user_list = UserListType(data=response.page(page_number), page_count=page_count, count=count)\n\n return user_list" }, { "identifier": "ChannelCreate", "path": "apps/chat/gql/mutations.py", "snippet": "class ChannelCreate(graphene.Mutation):\n \"\"\"\n Mutation to create a new channel.\n This mutation is responsible for creating a new channel in the system and adding specified members to it.\n \"\"\"\n\n class Arguments:\n channel_name = graphene.Argument(graphene.String, required=True, description=\"Name of the channel to be created.\")\n members = graphene.Argument(graphene.List(graphene.String), required=True, description=\"List of usernames to be added to the channel.\")\n\n Output = ResponseBase\n\n @login_required\n def mutate(self, info, channel_name, members):\n \"\"\"\n Creates a new channel with the given name and adds the specified members to it.\n\n Args:\n info (ResolveInfo): Information about the mutation.\n channel_name (str): Name of the channel to create.\n members (list of str): Usernames of members to add to the channel.\n\n Returns:\n ResponseBase: The result of the channel creation operation.\n \"\"\"\n user = info.context.user\n member_users = [User.objects.filter(username__iexact=username).first() for username in members]\n\n if None in member_users:\n raise Exception(\"Members are not valid.\")\n\n matter_admin = MattermostAdminProxy()\n channel_id = matter_admin.create_join_channel(channel_name=channel_name)\n for us in member_users + [user]:\n matter_admin.add_user_to_channel(channel_identifier=channel_name, user_identifier=us.username)\n\n if channel_id:\n return ResponseBase(\n status=http_code.HTTP_200_OK,\n status_code=http_code.HTTP_200_OK_CODE,\n message=\"Channel created successfully!\",\n metadata={\"channel_id\": channel_id},\n )\n else:\n return ResponseBase(\n status=http_code.HTTP_400_BAD_REQUEST_CODE,\n status_code=http_code.HTTP_400_BAD_REQUEST_CODE,\n message=\"Channel creation failed!\",\n )" }, { "identifier": "TextMessageSend", "path": "apps/chat/gql/mutations.py", "snippet": "class TextMessageSend(graphene.Mutation):\n \"\"\"\n Mutation to send a text message to a channel.\n This mutation handles sending a message to a specified channel and broadcasts it to all channel members.\n \"\"\"\n\n class Arguments:\n channel_identifier = graphene.Argument(graphene.String, required=True, description=\"Identifier of the channel to send the message to.\")\n text_message = graphene.Argument(graphene.String, required=True, description=\"Text message to be sent.\")\n\n Output = ResponseBase\n\n @login_required\n def mutate(self, info, channel_identifier, text_message):\n \"\"\"\n Sends a text message to a specified channel.\n\n Args:\n info (ResolveInfo): Information about the mutation.\n channel_identifier (str): Identifier of the channel.\n text_message (str): The text message to be sent.\n\n Returns:\n ResponseBase: The result of the message sending operation.\n \"\"\"\n user = info.context.user\n matter_user = MattermostUserProxy(login_id=user.username, password=user.password[:30])\n response = matter_user.send_message(channel_identifier=channel_identifier, message=text_message)\n\n formatted_response = {\n \"id\": response.get(\"id\", None),\n \"message\": response.get(\"message\", None),\n \"create_at\": response.get(\"create_at\", None),\n \"username\": response.get(\"username\", None),\n \"type\": response.get(\"type\", None),\n }\n\n async_to_sync(OnNewChatMessage.new_chat_message)(channel_identifier=channel_identifier, message=formatted_response)\n\n channel_layer = get_channel_layer()\n\n async_to_sync(channel_layer.group_add)(\n channel_identifier,\n channel_identifier,\n )\n\n # Prepare the message\n message = {\"type\": \"chat_message\", \"message\": json.dumps({\"channel_identifier\": channel_identifier, \"message\": formatted_response})}\n\n # Broadcast the message to the group\n async_to_sync(channel_layer.group_send)(channel_identifier, message)\n\n if formatted_response.get(\"id\", None):\n return ResponseBase(\n status=http_code.HTTP_200_OK,\n status_code=http_code.HTTP_200_OK_CODE,\n message=\"Message send successfully!\",\n metadata={\"message_id\": formatted_response[\"id\"]},\n )\n else:\n return ResponseBase(\n status=http_code.HTTP_400_BAD_REQUEST_CODE,\n status_code=http_code.HTTP_400_BAD_REQUEST_CODE,\n message=\"Message send failed!\",\n )" }, { "identifier": "ChannelList", "path": "apps/chat/gql/queries.py", "snippet": "class ChannelList(graphene.ObjectType):\n \"\"\"\n GraphQL ObjectType for listing channels.\n This class provides the functionality to query a list of channels associated with a user with pagination support.\n \"\"\"\n\n channel_list = graphene.Field(\n ChannelListType,\n page=graphene.Argument(PageType, description=\"Pagination details including page size and page number.\"),\n description=\"Query to retrieve a paginated list of channels.\",\n )\n\n @login_required\n def resolve_channel_list(self, info, **kwargs):\n \"\"\"\n Resolver for the channel_list query.\n Retrieves a list of channels based on pagination parameters. Requires user authentication.\n\n Args:\n info (ResolveInfo): Information about the query.\n **kwargs: Keyword arguments containing pagination details.\n\n Returns:\n ChannelListType: A paginated list of channels with an indication of whether there are more pages.\n \"\"\"\n user = info.context.user\n page = kwargs.get(\"page\", {\"page_size\": 10, \"page_number\": 0})\n\n matter_user = MattermostUserProxy(login_id=user.username, password=user.password[:30])\n data, has_next = matter_user.list_related_channels(exclude_list=[], params={\"page\": page[\"page_number\"], \"per_page\": page[\"page_size\"]})\n\n channel_list = ChannelListType(data=data, has_next=has_next)\n\n return channel_list" }, { "identifier": "GetMessageList", "path": "apps/chat/gql/queries.py", "snippet": "class GetMessageList(graphene.ObjectType):\n \"\"\"\n GraphQL ObjectType for retrieving a list of messages.\n This class allows querying a list of messages from a specific channel with pagination support.\n \"\"\"\n\n get_message_list = graphene.Field(\n MessageListType,\n channel_identifier=graphene.Argument(graphene.String, required=True, description=\"Identifier of the channel to retrieve messages from.\"),\n page=graphene.Argument(PageType, description=\"Pagination details including page size and page number.\"),\n description=\"Query to retrieve a paginated list of messages from a specified channel.\",\n )\n\n @login_required\n def resolve_get_message_list(self, info, **kwargs):\n \"\"\"\n Resolver for the get_message_list query.\n Retrieves a list of messages from a specified channel based on pagination parameters. Requires user authentication.\n\n Args:\n info (ResolveInfo): Information about the query.\n **kwargs: Keyword arguments containing the channel identifier and pagination details.\n\n Returns:\n MessageListType: A paginated list of messages with indications of previous and next pages.\n \"\"\"\n user = info.context.user\n page = kwargs.get(\"page\", {\"page_size\": 10, \"page_number\": 0})\n channel_identifier = kwargs.get(\"channel_identifier\", None)\n\n matter_user = MattermostUserProxy(login_id=user.username, password=user.password[:30])\n data, has_prev, has_next = matter_user.get_messages(channel_identifier=channel_identifier, params={\"page\": page[\"page_number\"], \"per_page\": page[\"page_size\"]})\n sorted_data = sorted(data, key=lambda x: x[\"create_at\"])\n\n message_list = MessageListType(data=sorted_data, has_previous=has_prev, has_next=has_next)\n\n return message_list" }, { "identifier": "OnNewChatMessage", "path": "apps/chat/gql/subscriptions.py", "snippet": "class OnNewChatMessage(subscription.Subscription):\n \"\"\"\n GraphQL Subscription for new chat messages.\n This subscription allows clients to listen for new messages on a specified channel.\n \"\"\"\n\n channel_identifier = graphene.String()\n message = graphene.Field(MessageQueryType)\n\n class Arguments:\n channel_identifier = graphene.String(required=True, description=\"The identifier of the chat channel to subscribe to.\")\n\n @staticmethod\n def subscribe(root, info, channel_identifier):\n \"\"\"\n Called when a user subscribes to the subscription.\n\n Args:\n root (Object): Root object, not used in this subscription.\n info (ResolveInfo): Information about the subscription.\n channel_identifier (str): Identifier of the channel to subscribe to.\n\n Returns:\n list: A list containing the channel identifier.\n\n Raises:\n Exception: If the user is not authenticated.\n \"\"\"\n user = info.context.channels_scope[\"user\"]\n # Check if the user is authenticated\n if not user or not user.is_authenticated:\n # Reject the subscription if the user is not authenticated\n raise Exception(\"User is not authenticated.\")\n\n print(\"new user has subscribed via ws.\", channel_identifier)\n return [channel_identifier]\n\n def publish(self, info, channel_identifier=None):\n \"\"\"\n Called to prepare the subscription notification message.\n\n Args:\n info (ResolveInfo): Information about the subscription.\n channel_identifier (str): The identifier of the channel.\n\n Returns:\n OnNewChatMessage: The subscription object with the new message.\n \"\"\"\n # The `self` contains payload delivered from the `broadcast()`.\n new_msg_channel_identifier = self[\"channel_identifier\"]\n new_msg = self[\"message\"]\n\n # Ensure that the published message is for the subscribed channel\n assert channel_identifier is None or channel_identifier == new_msg_channel_identifier\n\n return OnNewChatMessage(channel_identifier=channel_identifier, message=new_msg)\n\n @classmethod\n async def new_chat_message(cls, channel_identifier, message):\n \"\"\"\n Auxiliary function to send subscription notifications.\n\n This method encapsulates the broadcast invocation, allowing the payload structure to be considered as an implementation detail.\n\n Args:\n channel_identifier (str): The identifier of the channel where the message is sent.\n message (dict): The message to be sent.\n \"\"\"\n await cls.broadcast(\n group=channel_identifier,\n payload={\"channel_identifier\": channel_identifier, \"message\": message},\n )" } ]
import graphene import graphql_jwt from apps.account.gql.mutations import UserCreate from apps.account.gql.mutations import UserGetToken from apps.account.gql.queries import UserList from apps.chat.gql.mutations import ChannelCreate from apps.chat.gql.mutations import TextMessageSend from apps.chat.gql.queries import ChannelList from apps.chat.gql.queries import GetMessageList from apps.chat.gql.subscriptions import OnNewChatMessage
3,582
class Query(UserList, ChannelList, GetMessageList): pass class Mutation(graphene.ObjectType):
class Query(UserList, ChannelList, GetMessageList): pass class Mutation(graphene.ObjectType):
user_create = UserCreate.Field()
0
2023-12-25 11:40:56+00:00
4k
Hatins/DEOE
modules/data/genx.py
[ { "identifier": "custom_collate_rnd", "path": "data/genx_utils/collate.py", "snippet": "def custom_collate_rnd(batch: Any):\n samples = batch\n # NOTE: We do not really need the worker id for map style datasets (rnd) but we still provide the id for consistency\n worker_info = torch.utils.data.get_worker_info()\n local_worker_id = 0 if worker_info is None else worker_info.id\n return {\n 'data': custom_collate(samples),\n 'worker_id': local_worker_id,\n }" }, { "identifier": "custom_collate_streaming", "path": "data/genx_utils/collate.py", "snippet": "def custom_collate_streaming(batch: Any):\n \"\"\"We assume that we receive a batch collected by a worker of our streaming datapipe\n \"\"\"\n samples = batch[0]\n worker_id = batch[1]\n assert isinstance(worker_id, int)\n return {\n 'data': custom_collate(samples),\n 'worker_id': worker_id,\n }" }, { "identifier": "build_random_access_dataset", "path": "data/genx_utils/dataset_rnd.py", "snippet": "def build_random_access_dataset(dataset_mode: DatasetMode, dataset_config: DictConfig) -> CustomConcatDataset:\n dataset_path = Path(dataset_config.path)\n assert dataset_path.is_dir(), f'{str(dataset_path)}'\n\n mode2str = {DatasetMode.TRAIN: 'train',\n DatasetMode.VALIDATION: 'val',\n DatasetMode.TESTING: 'test'}\n\n split_path = dataset_path / mode2str[dataset_mode]\n assert split_path.is_dir()\n\n seq_datasets = list()\n for entry in tqdm(split_path.iterdir(), desc=f'creating rnd access {mode2str[dataset_mode]} datasets'):\n seq_datasets.append(SequenceDataset(path=entry, dataset_mode=dataset_mode, dataset_config=dataset_config))\n\n return CustomConcatDataset(seq_datasets)" }, { "identifier": "get_weighted_random_sampler", "path": "data/genx_utils/dataset_rnd.py", "snippet": "def get_weighted_random_sampler(dataset: CustomConcatDataset) -> WeightedRandomSampler:\n class2count = dict()\n ClassAndCount = namedtuple('ClassAndCount', ['class_ids', 'counts'])\n classandcount_list = list()\n print('--- START generating weighted random sampler ---')\n dataset.only_load_labels()\n for idx, data in enumerate(tqdm(dataset, desc='iterate through dataset')):\n labels: SparselyBatchedObjectLabels = data[DataType.OBJLABELS_SEQ]\n label_list, valid_batch_indices = labels.get_valid_labels_and_batch_indices()\n class_ids_seq = list()\n for label in label_list:\n class_ids_numpy = np.asarray(label.class_id.numpy(), dtype='int32')\n class_ids_seq.append(class_ids_numpy)\n class_ids_seq, counts_seq = np.unique(np.concatenate(class_ids_seq), return_counts=True)\n for class_id, count in zip(class_ids_seq, counts_seq):\n class2count[class_id] = class2count.get(class_id, 0) + count\n classandcount_list.append(ClassAndCount(class_ids=class_ids_seq, counts=counts_seq))\n dataset.load_everything()\n\n class2weight = {}\n for class_id, count in class2count.items():\n count = max(count, 1)\n class2weight[class_id] = 1 / count\n\n weights = []\n for classandcount in classandcount_list:\n weight = 0\n for class_id, count in zip(classandcount.class_ids, classandcount.counts):\n # Not only weight depending on class but also depending on number of occurrences.\n # This will bias towards sampling \"frames\" with more bounding boxes.\n weight += class2weight[class_id] * count\n weights.append(weight)\n\n print('--- DONE generating weighted random sampler ---')\n return WeightedRandomSampler(weights=weights, num_samples=len(weights), replacement=True)" }, { "identifier": "CustomConcatDataset", "path": "data/genx_utils/dataset_rnd.py", "snippet": "class CustomConcatDataset(ConcatDataset):\n datasets: List[SequenceDataset]\n\n def __init__(self, datasets: Iterable[SequenceDataset]):\n super().__init__(datasets=datasets)\n\n def only_load_labels(self):\n for idx, dataset in enumerate(self.datasets):\n self.datasets[idx].only_load_labels()\n\n def load_everything(self):\n for idx, dataset in enumerate(self.datasets):\n self.datasets[idx].load_everything()" }, { "identifier": "build_streaming_dataset", "path": "data/genx_utils/dataset_streaming.py", "snippet": "def build_streaming_dataset(dataset_mode: DatasetMode, dataset_config: DictConfig, batch_size: int, num_workers: int) \\\n -> Union[ConcatStreamingDataPipe, ShardedStreamingDataPipe]:\n dataset_path = Path(dataset_config.path)\n assert dataset_path.is_dir(), f'{str(dataset_path)}'\n\n mode2str = {DatasetMode.TRAIN: 'train',\n DatasetMode.VALIDATION: 'val',\n DatasetMode.TESTING: 'test'}\n\n split_path = dataset_path / mode2str[dataset_mode]\n assert split_path.is_dir()\n datapipes = list()\n num_full_sequences = 0\n num_splits = 0\n num_split_sequences = 0\n guarantee_labels = dataset_mode == DatasetMode.TRAIN\n for entry in tqdm(split_path.iterdir(), desc=f'creating streaming {mode2str[dataset_mode]} datasets'):\n new_datapipes = get_sequences(path=entry, dataset_mode = dataset_mode, dataset_config=dataset_config, guarantee_labels=guarantee_labels)\n if len(new_datapipes) == 1:\n num_full_sequences += 1\n else:\n num_splits += 1\n num_split_sequences += len(new_datapipes)\n datapipes.extend(new_datapipes)\n print(f'{num_full_sequences=}\\n{num_splits=}\\n{num_split_sequences=}')\n\n if dataset_mode == DatasetMode.TRAIN:\n return build_streaming_train_dataset(\n datapipes=datapipes, dataset_config=dataset_config, batch_size=batch_size, num_workers=num_workers)\n elif dataset_mode in (DatasetMode.VALIDATION, DatasetMode.TESTING):\n return build_streaming_evaluation_dataset(datapipes=datapipes, batch_size=batch_size)\n else:\n raise NotImplementedError" }, { "identifier": "get_dataloading_hw", "path": "data/utils/spatial.py", "snippet": "def get_dataloading_hw(dataset_config: DictConfig):\n dataset_name = dataset_config.name\n hw = get_original_hw(dataset_type=_str_2_type[dataset_name])\n downsample_by_factor_2 = dataset_config.downsample_by_factor_2\n if downsample_by_factor_2:\n hw = tuple(x // 2 for x in hw)\n return hw" }, { "identifier": "DatasetMode", "path": "data/utils/types.py", "snippet": "class DatasetMode(Enum):\n TRAIN = auto()\n VALIDATION = auto()\n TESTING = auto()" }, { "identifier": "DatasetSamplingMode", "path": "data/utils/types.py", "snippet": "class DatasetSamplingMode(StrEnum):\n RANDOM = 'random'\n STREAM = 'stream'\n MIXED = 'mixed'" } ]
from functools import partial from typing import Any, Dict, Optional, Union from omegaconf import DictConfig from torch.utils.data import DataLoader, Dataset from data.genx_utils.collate import custom_collate_rnd, custom_collate_streaming from data.genx_utils.dataset_rnd import build_random_access_dataset, get_weighted_random_sampler, CustomConcatDataset from data.genx_utils.dataset_streaming import build_streaming_dataset from data.utils.spatial import get_dataloading_hw from data.utils.types import DatasetMode, DatasetSamplingMode import math import pytorch_lightning as pl
2,416
def get_dataloader_kwargs(dataset: Union[Dataset, CustomConcatDataset], sampling_mode: DatasetSamplingMode, dataset_mode: DatasetMode, dataset_config: DictConfig, batch_size: int, num_workers: int) -> Dict[str, Any]: if dataset_mode == DatasetMode.TRAIN: if sampling_mode == DatasetSamplingMode.STREAM: return dict( dataset=dataset, batch_size=None, shuffle=False, # Done already in the streaming datapipe num_workers=num_workers, pin_memory=False, drop_last=False, # Cannot be done with streaming datapipes collate_fn=custom_collate_streaming, ) if sampling_mode == DatasetSamplingMode.RANDOM: use_weighted_rnd_sampling = dataset_config.train.random.weighted_sampling sampler = get_weighted_random_sampler(dataset) if use_weighted_rnd_sampling else None return dict( dataset=dataset, batch_size=batch_size, shuffle=sampler is None, sampler=sampler, num_workers=num_workers, pin_memory=False, drop_last=True, # Maintain the same batch size for logging collate_fn=custom_collate_rnd, ) raise NotImplementedError elif dataset_mode in (DatasetMode.VALIDATION, DatasetMode.TESTING): if sampling_mode == DatasetSamplingMode.STREAM: return dict( dataset=dataset, batch_size=None, shuffle=False, num_workers=num_workers, pin_memory=False, drop_last=False, # Cannot be done with streaming datapipes collate_fn=custom_collate_streaming, ) if sampling_mode == DatasetSamplingMode.RANDOM: return dict( dataset=dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=False, drop_last=True, # Maintain the same batch size for logging collate_fn=custom_collate_rnd, ) raise NotImplementedError raise NotImplementedError class DataModule(pl.LightningDataModule): def __init__(self, dataset_config: DictConfig, num_workers_train: int, num_workers_eval: int, batch_size_train: int, batch_size_eval: int): super().__init__() assert num_workers_train >= 0 assert num_workers_eval >= 0 assert batch_size_train >= 1 assert batch_size_eval >= 1 self.dataset_config = dataset_config self.train_sampling_mode = dataset_config.train.sampling self.eval_sampling_mode = dataset_config.eval.sampling assert self.train_sampling_mode in iter(DatasetSamplingMode) assert self.eval_sampling_mode in (DatasetSamplingMode.STREAM, DatasetSamplingMode.RANDOM) # In DDP all configs are per process/GPU (num_workers, batch_size, ...). self.overall_batch_size_train = batch_size_train self.overall_batch_size_eval = batch_size_eval self.overall_num_workers_train = num_workers_train self.overall_num_workers_eval = num_workers_eval if self.eval_sampling_mode == DatasetSamplingMode.STREAM: self.build_eval_dataset = partial(build_streaming_dataset, batch_size=self.overall_batch_size_eval, num_workers=self.overall_num_workers_eval) elif self.eval_sampling_mode == DatasetSamplingMode.RANDOM:
def get_dataloader_kwargs(dataset: Union[Dataset, CustomConcatDataset], sampling_mode: DatasetSamplingMode, dataset_mode: DatasetMode, dataset_config: DictConfig, batch_size: int, num_workers: int) -> Dict[str, Any]: if dataset_mode == DatasetMode.TRAIN: if sampling_mode == DatasetSamplingMode.STREAM: return dict( dataset=dataset, batch_size=None, shuffle=False, # Done already in the streaming datapipe num_workers=num_workers, pin_memory=False, drop_last=False, # Cannot be done with streaming datapipes collate_fn=custom_collate_streaming, ) if sampling_mode == DatasetSamplingMode.RANDOM: use_weighted_rnd_sampling = dataset_config.train.random.weighted_sampling sampler = get_weighted_random_sampler(dataset) if use_weighted_rnd_sampling else None return dict( dataset=dataset, batch_size=batch_size, shuffle=sampler is None, sampler=sampler, num_workers=num_workers, pin_memory=False, drop_last=True, # Maintain the same batch size for logging collate_fn=custom_collate_rnd, ) raise NotImplementedError elif dataset_mode in (DatasetMode.VALIDATION, DatasetMode.TESTING): if sampling_mode == DatasetSamplingMode.STREAM: return dict( dataset=dataset, batch_size=None, shuffle=False, num_workers=num_workers, pin_memory=False, drop_last=False, # Cannot be done with streaming datapipes collate_fn=custom_collate_streaming, ) if sampling_mode == DatasetSamplingMode.RANDOM: return dict( dataset=dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=False, drop_last=True, # Maintain the same batch size for logging collate_fn=custom_collate_rnd, ) raise NotImplementedError raise NotImplementedError class DataModule(pl.LightningDataModule): def __init__(self, dataset_config: DictConfig, num_workers_train: int, num_workers_eval: int, batch_size_train: int, batch_size_eval: int): super().__init__() assert num_workers_train >= 0 assert num_workers_eval >= 0 assert batch_size_train >= 1 assert batch_size_eval >= 1 self.dataset_config = dataset_config self.train_sampling_mode = dataset_config.train.sampling self.eval_sampling_mode = dataset_config.eval.sampling assert self.train_sampling_mode in iter(DatasetSamplingMode) assert self.eval_sampling_mode in (DatasetSamplingMode.STREAM, DatasetSamplingMode.RANDOM) # In DDP all configs are per process/GPU (num_workers, batch_size, ...). self.overall_batch_size_train = batch_size_train self.overall_batch_size_eval = batch_size_eval self.overall_num_workers_train = num_workers_train self.overall_num_workers_eval = num_workers_eval if self.eval_sampling_mode == DatasetSamplingMode.STREAM: self.build_eval_dataset = partial(build_streaming_dataset, batch_size=self.overall_batch_size_eval, num_workers=self.overall_num_workers_eval) elif self.eval_sampling_mode == DatasetSamplingMode.RANDOM:
self.build_eval_dataset = build_random_access_dataset
2
2023-12-29 04:04:34+00:00
4k
Enthusiasm23/primkit
src/primkit/utils/DataFetcher.py
[ { "identifier": "WebDriverUtility", "path": "src/primkit/utils/SiteSeleniumer.py", "snippet": "class WebDriverUtility:\n \"\"\"\n Utility class to interact with a website using Selenium WebDriver.\n \"\"\"\n\n def __init__(self, url, driver_path=CHROME_DRIVER_PATH, timeout=DEFAULT_TIMEOUT):\n \"\"\"\n Initializes a WebDriverUtility instance to manage a WebDriver session.\n\n This constructor sets up a Chrome WebDriver using the specified driver path and URL.\n It waits for the page to load within the given timeout period.\n\n Parameters:\n :param url (str): The URL to be accessed by the WebDriver.\n :param driver_path (str, optional): The file path to the ChromeDriver executable. If not provided,\n a default path defined by CHROME_DRIVER_PATH is used.\n :param timeout (int, optional): The maximum time in seconds to wait for the page to load.\n If not provided, a default timeout defined by DEFAULT_TIMEOUT is used.\n\n The driver is set to the `self.driver` attribute and can be accessed by instance methods.\n \"\"\"\n self.url = url\n self.driver_path = driver_path\n self.timeout = timeout\n self.driver = None\n\n def init_driver(self, return_driver=False):\n \"\"\"\n Initializes the WebDriver if it has not been initialized already.\n \"\"\"\n if not self.driver:\n self.driver = get_chrome_driver(driver_path=self.driver_path)\n self.driver.maximize_window()\n if return_driver:\n logging.warning(\"Direct access to the WebDriver is granted. This reduces encapsulation and abstraction.\")\n return self.driver\n\n def load_url(self):\n \"\"\"\n Navigates to the URL set during initialization after ensuring the driver is ready.\n If the URL is not valid, raises a ValueError.\n \"\"\"\n if not is_url(self.url):\n raise ValueError(f\"Invalid URL: {self.url}\")\n\n self.init_driver()\n self.driver.get(self.url)\n self.ensure_loaded(self.timeout)\n\n def ensure_loaded(self, timeout=DEFAULT_TIMEOUT):\n \"\"\"\n Waits for the page to load within the given timeout period.\n \"\"\"\n WebDriverWait(self.driver, timeout).until(\n lambda d: d.execute_script('return document.readyState') == 'complete'\n )\n\n @staticmethod\n def get_headers():\n \"\"\"\n # Call the function imported from primertools.utils.gather_system_details\n \"\"\"\n return get_system_headers()\n\n @staticmethod\n def format_cookies(cookies):\n \"\"\"\n Convert cookies from WebDriver format to requests format.\n\n :param cookies: A list of cookies from WebDriver.\n :return: A dictionary of cookies in requests format.\n \"\"\"\n return {cookie['name']: cookie['value'] for cookie in cookies}\n\n def get_cookies(self):\n \"\"\"\n Navigate to a URL and return the cookies found on the page.\n\n :return: 'cookies' in requests format.\n \"\"\"\n cookies = self.driver.get_cookies()\n\n return self.format_cookies(cookies)\n\n def get_token(self, token_name=XSRF_NAME):\n \"\"\"\n the specified token found on the page.\n\n :param token_name: The name of the token to retrieve (default is '_xsrf').\n :return: the specified 'token'.\n \"\"\"\n token = self.get_dynamic_token(token_name)\n\n return token\n\n def refresh_page(self):\n \"\"\"\n Refreshes the current page.\n \"\"\"\n self.driver.refresh()\n\n def ensure_element(self, locator, by=By.CSS_SELECTOR, timeout=DEFAULT_TIMEOUT):\n \"\"\"\n Wait for an element to appear and be visible on the page.\n\n :param locator: The locator of the element to wait for.\n :param by: The type of strategy to locate the element (default is By.CSS_SELECTOR).\n :param timeout: Maximum time in seconds to wait for the element to appear.\n :return: True if the element appears within the timeout, False otherwise.\n \"\"\"\n try:\n WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located((by, locator)))\n return True\n except TimeoutException:\n return False\n\n def get_dynamic_token(self, token_name, timeout=DEFAULT_TIMEOUT):\n \"\"\"\n Retrieve a dynamic token from the page.\n\n :param token_name: The name of the token to retrieve.\n :param timeout: Maximum time in seconds to wait for the token to become available.\n :return: The value of the token if found, None otherwise.\n \"\"\"\n try:\n WebDriverWait(self.driver, timeout).until(\n EC.presence_of_element_located((By.NAME, token_name))\n )\n return self.driver.find_element(By.NAME, token_name).get_attribute('value')\n except Exception as e:\n logger.error(f\"Error retrieving token: {e}\")\n return None\n\n def get_page_source(self):\n \"\"\"\n Retrieves the source code of the current page loaded in the WebDriver.\n\n :return: A string representing the source code of the current page.\n \"\"\"\n return self.driver.page_source\n\n def scroll_to_element(self, element):\n \"\"\"\n Scrolls the browser window to an element.\n\n :param element: The WebElement to scroll to.\n \"\"\"\n self.driver.execute_script(\"arguments[0].scrollIntoView(true);\", element)\n\n def is_driver_active(self):\n \"\"\"\n Checks if the WebDriver is still active.\n\n :return: True if the WebDriver session is still active, False if it has been closed.\n \"\"\"\n try:\n # Attempt to get the current URL. If the driver is closed, this will raise an exception.\n _ = self.driver.current_url\n return True\n except WebDriverException:\n return False\n\n def find_element(self, by, value):\n \"\"\"\n Finds an element on the page based on the provided locator.\n\n :param by: The method to locate the element (e.g., By.ID, By.CSS_SELECTOR).\n :param value: The value of the locator.\n :return: The found web element.\n \"\"\"\n return self.driver.find_element(by, value)\n\n def click_by_locator(self, by, value):\n \"\"\"\n Clicks an element on the page identified by a locator.\n\n :param by: The method to locate the element.\n :param value: The value of the locator.\n \"\"\"\n element = self.find_element(by, value)\n element.click()\n\n @staticmethod\n def click_element(element):\n \"\"\"\n Clicks a web element.\n\n :param element: The web element to click.\n \"\"\"\n element.click()\n\n def clear_by_locator(self, by, value):\n \"\"\"\n Clears the content of an input field identified by a locator.\n\n :param by: The method to locate the element.\n :param value: The value of the locator.\n \"\"\"\n element = self.find_element(by, value)\n element.clear()\n\n @staticmethod\n def clear_element(element):\n \"\"\"\n Clears the content of a web element.\n\n :param element: The web element to clear.\n \"\"\"\n element.clear()\n\n def input_by_locator(self, by, value, text):\n \"\"\"\n Inputs text into an element identified by a locator.\n\n :param by: The method to locate the element.\n :param value: The value of the locator.\n :param text: The text to input into the element.\n \"\"\"\n element = self.find_element(by, value)\n element.send_keys(text)\n\n @staticmethod\n def input_element(element, *args):\n \"\"\"\n Inputs text or key sequences into a web element.\n\n Usage Examples:\n - utility.input_element(element, \"Text to input\")\n - utility.input_element(element, Keys.SHIFT, Keys.SPACE)\n\n :param element: The web element where the text or key sequences will be input.\n :param args: The text or key sequences to input into the element.\n \"\"\"\n for arg in args:\n element.send_keys(arg)\n\n def input_values(self, locator_or_element, input_value, by=None):\n \"\"\"\n Inputs the given value into the element identified by the locator or directly into the provided element.\n\n Usage Examples:\n - Using a Locator Tuple:\n utility.input_values((By.CSS_SELECTOR, \"#inputElementId\"), \"Input Value\")\n utility.input_values((By.ID, \"inputElementId\"), \"Input Value\")\n\n - Using a Web Element:\n element = utility.find_element(By.CSS_SELECTOR, \"#inputElementId\")\n utility.input_values(element, \"Input Value\")\n\n - Using locator value with by parameter:\n utility.input_values(\"inputElementId\", \"Input Value\", by=By.ID)\n\n :param locator_or_element: Either a locator tuple, an element object, or a locator string.\n :param input_value: The value to input into the element.\n :param by: Optional; The type of the locator (By.CSS_SELECTOR, By.ID, etc.).\n Required if the first parameter is a locator string.\n :raises: Exception if the locator_or_element is not a valid WebElement or locator tuple.\n \"\"\"\n if isinstance(locator_or_element, tuple):\n # Check if the first element of the tuple is a valid By attribute\n if not (locator_or_element[0] in vars(By).values() and isinstance(locator_or_element[1], str)):\n raise ValueError(\"Locator tuple is not in the correct order (By method, locator value).\")\n element = self.find_element(*locator_or_element)\n elif by is not None:\n element = self.find_element(by, locator_or_element)\n elif isinstance(locator_or_element, WebElement):\n element = locator_or_element\n else:\n raise ValueError(\"The locator_or_element argument must be a locator tuple, WebElement, or string with 'by' parameter.\")\n\n # Perform actions on the located element\n self.click_element(element)\n self.clear_element(element)\n self.input_element(element, input_value)\n\n def get_element_attribute(self, locator, attribute, by=By.CSS_SELECTOR):\n \"\"\"\n Gets the specified attribute of an element.\n\n Usage Example:\n - utility.get_element_attribute(\"#myElement\", \"href\")\n\n :param locator: The locator of the element.\n :param attribute: The attribute to retrieve from the element.\n :param by: The method to locate the element (default is By.CSS_SELECTOR).\n :return: The value of the specified attribute, or None if the element is not found.\n \"\"\"\n try:\n element = self.driver.find_element(by, locator)\n return element.get_attribute(attribute)\n except Exception as e:\n logger.error(f'Error getting attribute from element: {e}')\n return None\n\n def select_dropdown_option(self, dropdown_selector, option_value):\n \"\"\"\n Selects an option from a dropdown select element based on the value attribute of the option.\n Raises an exception if the option_value is not found in the dropdown.\n\n :param dropdown_selector: The CSS selector for the dropdown select element.\n :param option_value: The value attribute of the option to be selected.\n :raises NoSuchElementException: If the option_value is not found in the dropdown options.\n \"\"\"\n # Find the dropdown select element\n dropdown_element = self.find_element(By.CSS_SELECTOR, dropdown_selector)\n\n # Create a Select object for the dropdown element\n select = Select(dropdown_element)\n\n # Check if the option value is present in the dropdown\n if not any(option.get_attribute('value') == option_value for option in select.options):\n available_options = [option.get_attribute('value') for option in select.options]\n raise NoSuchElementException(f\"The option value '{option_value}' was not found in the dropdown. \"\n f\"Available options are: {available_options}\")\n\n # Select the option by its value attribute\n select.select_by_value(option_value)\n\n def close(self):\n \"\"\"\n Closes the WebDriver session.\n \"\"\"\n if self.driver:\n self.driver.quit()\n self.driver = None" }, { "identifier": "get_site_data", "path": "src/primkit/utils/SiteRequester.py", "snippet": "def get_site_data(url):\n \"\"\"\n Fetch headers, cookies, and the CSRF token from a given URL.\n If the URL is not valid, raises a ValueError.\n\n Parameters:\n - url (str): The URL to fetch the data from.\n\n Returns:\n - dict: The headers obtained from the `get_headers` function.\n - dict: The cookies obtained from the response, formatted as a dictionary.\n - str or None: The value of the CSRF token, if found; otherwise, None.\n\n Raises:\n - requests.RequestException: If there is an error making the GET request.\n - ValueError: If the status code is not 200 or the CSRF token is not found.\n \"\"\"\n if not is_url(url):\n raise ValueError(f\"Invalid URL: {url}\")\n\n try:\n response = requests.get(url)\n if response.status_code != 200:\n raise ValueError(f\"Error fetching data from {url}: Status code {response.status_code}\")\n\n headers = get_headers()\n cookies = {cookie.name: cookie.value for cookie in response.cookies}\n\n soup = BeautifulSoup(response.content, 'html.parser')\n token_element = soup.find('input', {'name': XSRF_NAME})\n token = token_element['value'] if token_element else None\n\n if token is None:\n raise ValueError(f\"CSRF token not found at {url}\")\n\n return headers, cookies, token\n\n except requests.RequestException as e:\n raise requests.RequestException(f\"Error fetching data from {url}: {e}\")\n\n except Exception as e:\n raise Exception(f\"Unexpected error: {e}\")" }, { "identifier": "PRIMER_URL", "path": "src/primkit/config.py", "snippet": "PRIMER_URL = os.environ.get('PRIMER_URL', f\"{MFE_PRIMER}/muld\")" } ]
import logging import requests from selenium.common.exceptions import WebDriverException from ..utils.SiteSeleniumer import WebDriverUtility from ..utils.SiteRequester import get_site_data from ..config import PRIMER_URL
3,493
logger = logging.getLogger(__name__) def fetch_web_data(url=PRIMER_URL, method='requests'): """ Fetches headers, cookies, and a token from a given URL using either requests or selenium. Parameters: - url (str): URL to fetch data from. - method (str): Method to use for fetching data ('requests' or 'selenium'). Returns: - tuple: (headers, cookies, token) if successful, otherwise raises an error. """ logger.info(f"Fetching web data from {url} using {method}") headers = {} cookies = {} token = None if method == 'requests': # requests fetching logic try: # (requests fetching implementation)
logger = logging.getLogger(__name__) def fetch_web_data(url=PRIMER_URL, method='requests'): """ Fetches headers, cookies, and a token from a given URL using either requests or selenium. Parameters: - url (str): URL to fetch data from. - method (str): Method to use for fetching data ('requests' or 'selenium'). Returns: - tuple: (headers, cookies, token) if successful, otherwise raises an error. """ logger.info(f"Fetching web data from {url} using {method}") headers = {} cookies = {} token = None if method == 'requests': # requests fetching logic try: # (requests fetching implementation)
headers, cookies, token = get_site_data(url)
1
2023-12-25 14:12:46+00:00
4k
Wangyuhao06/2022-adhoc
src/env.py
[ { "identifier": "random_waypoint", "path": "pymobility/models/mobility.py", "snippet": "def random_waypoint(*args, **kwargs):\n return iter(RandomWaypoint(*args, **kwargs))" }, { "identifier": "Node", "path": "src/node.py", "snippet": "class Node(object):\n def __init__(self,id_node):\n super(Node, self).__init__()\n #multi-agent sys setting\n self.node_max=36\n self.act_range=self.node_max-1 #最大邻居范围\n # current agent-property setting\n self.id=id_node#该节点id\n # 1 - packets\n self.packets_ToSend_id=[]#该节点当前待传的包\n self.packets_id_list=[]#该节点至今为止保存过的包id\n \n self.sending_flag=0\n self.rec_flag=0\n \n self.trans_task_send=Queue(maxsize=1)#该节点当前传输的任务\n self.trans_taskID_rec=[]#该节点当前接收的任务\n # 2 - energy\n self.current_amp_send=0#节点当前发送增益--------动作\n #self.current_amp_receive=0#节点当前接收增益--------动作\n \n self.current_power_send=0#节点当前发送功率\n self.current_power_receive=0#节点当前接收功率\n self.power_list=[]#节点使用能量记录\n \n self.energy_consumption=0#截至现在能量消耗\n # 3 - freq\n self.current_freqB=[1]#当前选用频谱块--------动作\n self.freqB_list=[1]#频谱块历史\n # 4 - topology\n self.neibor_idlist=[]\n self.next_hop_id=-1#下一条节点id--------动作\n # 5 - observation\n #self.ob_send=[]\n \n # def observation_rec(self,send_node):\n # if len(self.ob_send)==0 or len(send_node.ob_send)==0 :\n # raise ValueError(\"send observation unfinished\")\n # self.ob_rec.append(self.ob_send[-1])\n # self.ob_rec.append(send_node.ob_send[-1])\n # return self.ob_rec\n \n \n def get_send_action(self,ob,action_space):\n \n ###缺省决策###\n \n #改变属性\n return self.current_amp_send,self.current_freqB,self.next_hop_id\n \n def get_rec_action(self,ob):\n \n ###缺省决策###\n \n #改变属性\n return self.current_amp_receive " }, { "identifier": "Packet", "path": "src/packet.py", "snippet": "class Packet(object):\n def __init__(self,id_packet,packet_size,ori_node_id,dst_node_id,time_start_0):\n super(Packet, self).__init__()\n self.id=id_packet\n self.size=packet_size\n #节点属性\n self.ori_node_id=ori_node_id\n self.cur_node_id=ori_node_id\n self.dst_node_id=dst_node_id\n self.node_list=[ori_node_id]\n #T-T属性\n self.cur_trans_task_id=-100\n self.in_TR=0\n self.trans_task_IDlist=[]\n #路由属性\n self.time_start=time_start_0\n self.time_use=0\n self.arrive_flag=0\n \n def packet_trans_update(self,trans_task):\n if trans_task.trans_property[2]!=self.id:\n raise ValueError('trans_task not matched')\n self.cur_trans_task_id=trans_task.id" }, { "identifier": "Trans_task", "path": "src/transtask.py", "snippet": "class Trans_task(object):\n def __init__(self,trans_id,node_send,node_rec,packet):\n self.id=trans_id\n self.trans_property=(node_send.id,node_rec.id,packet.id)#基本属性\n self.packsize=packet.size\n ####frequency block info####\n self.FreqB_occup=node_send.current_freqB #占用频谱块id\n ####SINR and Capacity####\n self.SNR_C=([],1)#Y(SNR,Capacity)-----------------[X(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...]\n ####time of trans####\n self.time_use=1#int(self.packsize/self.SNR_C[1])+1\n self.time_cnt=0\n self.finish_flag=0\n ####energy setting####\n self.energy_property = (node_send.current_amp_send,RECAMP)\n self.energy_consume=(node_send.current_amp_send*packet.size*PACKENERGY,RECAMP*packet.size*PACKENERGY)\n self.power_consume=(round(node_send.current_amp_send*packet.size*PACKENERGY/self.time_use,6),round(RECAMP*packet.size*PACKENERGY/self.time_use,6))\n \n def show_info(self):\n return self.trans_property[0],self.trans_property[1],self.trans_property[2]\n \n def Trans_task_update(self):\n if self.finish_flag:\n return 1\n if self.time_cnt>=self.time_use:\n self.finish_flag=1\n return 1\n elif self.time_cnt<self.time_use:\n self.time_cnt+=1\n return 0\n \n \n #trans_task=tuple([],{},(node_send_id,node_send_amp,node_rec_id,node_rec_amp,packet_id),0)\n #tuple:([占用频谱块id],{(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...},(基本属性:发送节点id,发送增益,接收节点id,接收增益,包id),完成标志位)" } ]
import random import numpy as np from math import log2, log10 from queue import Queue from pymobility.models.mobility import random_waypoint from src.node import Node from src.packet import Packet from src.parameter import * from src.transtask import Trans_task
1,927
class Environment(): #初始化环境 def __init__(self): #初始数据-最大节点数 self.node_max=NODE_MAX self.node_space_size=NODE_MAX self.node_moving_area=MOV_AREA #初始化二维平面 self.geo_area = random_waypoint(self.node_max, dimensions=(MOV_AREA, MOV_AREA), velocity=(10, 15), wt_max=1.0) self.position=0 #初始化随机相邻矩阵 self.topology = np.zeros((self.node_space_size,self.node_space_size)) self.topology[0:self.node_max,0:self.node_max] = np.random.randint(0,2,(self.node_max,self.node_max)) for i in range(self.node_max): self.topology[i,i] = 1 for j in range(self.node_max): #构建双向图 if self.topology[i,j] == 1: self.topology[j,i] = 1 #初始化节点动作空间 self.topology_actSpace=[] #初始化频谱块元组-----(0,[])表示(占用与否,[占用transtaskID列表]) self.freqB_list=([],[],[],[],[],[],[],[],[],[]) #((0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[])) self.freqB_use_history=([],[],[],[],[],[],[],[],[],[]) #初始化传输事件列表 self.trans_task_ID_inTR=[] self.trans_task_list=[] self.trans_task_cnt=0 # id计数器 #初始化包列表 self.amount_poisson_list = np.random.poisson(lam=LAMDA,size=MAX_TIME)#包数量初始化 self.size_normal_list = ((np.random.normal(0,1,MAX_TIME*2)*16+16)//8)*8#包大小初始化 self.pack_use_cnt=0#包序号计数器 self.packets_list=[]#包列表 self.packets_live_id=[] #初始化节点列表 self.node_list=[] self.live_node_ID_list=[] for i in range(self.node_max):
class Environment(): #初始化环境 def __init__(self): #初始数据-最大节点数 self.node_max=NODE_MAX self.node_space_size=NODE_MAX self.node_moving_area=MOV_AREA #初始化二维平面 self.geo_area = random_waypoint(self.node_max, dimensions=(MOV_AREA, MOV_AREA), velocity=(10, 15), wt_max=1.0) self.position=0 #初始化随机相邻矩阵 self.topology = np.zeros((self.node_space_size,self.node_space_size)) self.topology[0:self.node_max,0:self.node_max] = np.random.randint(0,2,(self.node_max,self.node_max)) for i in range(self.node_max): self.topology[i,i] = 1 for j in range(self.node_max): #构建双向图 if self.topology[i,j] == 1: self.topology[j,i] = 1 #初始化节点动作空间 self.topology_actSpace=[] #初始化频谱块元组-----(0,[])表示(占用与否,[占用transtaskID列表]) self.freqB_list=([],[],[],[],[],[],[],[],[],[]) #((0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[])) self.freqB_use_history=([],[],[],[],[],[],[],[],[],[]) #初始化传输事件列表 self.trans_task_ID_inTR=[] self.trans_task_list=[] self.trans_task_cnt=0 # id计数器 #初始化包列表 self.amount_poisson_list = np.random.poisson(lam=LAMDA,size=MAX_TIME)#包数量初始化 self.size_normal_list = ((np.random.normal(0,1,MAX_TIME*2)*16+16)//8)*8#包大小初始化 self.pack_use_cnt=0#包序号计数器 self.packets_list=[]#包列表 self.packets_live_id=[] #初始化节点列表 self.node_list=[] self.live_node_ID_list=[] for i in range(self.node_max):
locals()['node_'+str(i)] = Node(i)
1
2023-12-30 09:35:30+00:00
4k
alshubati99/BeamEye
detectionCode.py
[ { "identifier": "label_map_util", "path": "detectionElements/label_map_util.py", "snippet": "def create_category_index(categories):\ndef convert_label_map_to_categories(label_map,\n max_num_classes,\n use_display_name=True):\ndef load_labelmap(path):\n\tdef validate_label_map(lm):" }, { "identifier": "drawing_tools", "path": "detectionElements/drawing_tools.py", "snippet": "def convert_and_draw_box(image,\n y_min,\n x_min,\n y_max,\n x_max,\n color,\n thickness=4,\n display_str_list=(),\n labels=False,\n crowd=False,\n accuracy=False,\n ):\ndef draw_box(image,\n y_min,\n x_min,\n y_max,\n x_max,\n color,\n thickness=4,\n display_str_list=(),\n labels=False,\n crowd=False,\n accuracy=False,\n\n ):\ndef draw_boxes_on_image_array(image,\n boxes,\n classes,\n scores,\n category_index,\n min_score_thresh=.3,\n line_thickness=4,\n labels=False,\n crowd=False,\n accuracy=False,\n pedestrian_color='undefined',\n crowd_color='undefined',\n ):" } ]
import time import numpy as np import os import tensorflow as tf import cv2 import uiElements.sharedVariables as User import uiElements.sharedVariables as User from shutil import copy2 from time import sleep from detectionElements import label_map_util, drawing_tools from detectionElements.resizeVideo import resize_video
1,667
def frames_to_video(fps, output_folder='videoOut//'): output_folder += "//" image_folder = 'videoFrames//' video_name = output_folder + 'video.avi' images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")] # print(images) frame = cv2.imread(os.path.join(image_folder, images[0])) height, width, layers = frame.shape fourcc = cv2.VideoWriter_fourcc(*'XVID') video = cv2.VideoWriter(video_name, fourcc, fps, (width, height)) # print(video_name) for image in images: video.write(cv2.imread(os.path.join(image_folder, image))) copy2(video_name, video_name[:-4] + "_copy.avi") cv2.destroyAllWindows() video.release() User.frames_progress = 100 User.finished = True User.output_video = video_name def detect(): User.finished = False while User.wait: sleep(2) print("waiting") else: print("Got Video") User.wait = True high_res = User.high_res if not high_res: resized_video = resize_video(User.input_video_path) if resized_video: cap = cv2.VideoCapture(resized_video) print("resized video") User.input_video_path = resized_video print(cap) else: cap = cv2.VideoCapture(User.input_video_path) print("didnt resize video") # User.input_video_path = None print(User.input_video_path) print(cap) with open("uiElements//userSettings.txt", "r", encoding="utf-8") as f: settings = [line.split(" ")[-1] for line in f.read().split("\n")] include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color, output_path = settings output_path = output_path.replace("_SPACE_", " ") include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color = int(include_labels), int( include_crowd), int(include_accuracy), int(pedestrian_color), int(crowd_color) # in settings {1: "blue", 2: "purple", 3: "red", 4: "orange", 5: "yellow", 6: "green"} color_dict = {1: "#0094FF", 2: "#FF00F6", 3: "red", 4: "#FF6A00", 5: "yellow", 6: "#26FF5C"} # {1: "red", 2: "purple", 3: "blue", 4: "DodgerBlue", 5: "DeepSkyBlue", 6: "#00FF0C"} pedestrian_color = color_dict[pedestrian_color] crowd_color = color_dict[crowd_color] frame_rate = int(cap.get(cv2.CAP_PROP_FPS)) fps = cap.get(cv2.CAP_PROP_FPS) video_frames_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) frame_count = 0 video_frame_count = 0 pedestrian_count_second, crowd_count_second = [], [] # pedestrian_count_frame, crowd_count_frame = 0, 0 def begin(): empty_frames_folder() nonlocal frame_count, video_frame_count, pedestrian_color, crowd_color, cap with detection_graph.as_default(): with tf.compat.v1.Session(graph=detection_graph) as sess: frames_left = 100 # percent pedestrian_count_frame, crowd_count_frame = 0, 0 increment_progress_bar = 0 while True: frame_count += 1 if increment_progress_bar >= 2.28 * video_frames_total / 100: frames_left -= 2.28 User.frames_progress += 2 print( f"Processed {100 - frames_left:.2f}% of frames, {frames_left:.2f}% left. Progress Bar: {User.frames_progress}") increment_progress_bar = 0 # if frame_count == frame_rate+1: success, image_np = cap.read() if not success: print('EOF') break # flatten image using numpy image_np_expanded = np.expand_dims(image_np, axis=0) image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') boxes = detection_graph.get_tensor_by_name('detection_boxes:0') scores = detection_graph.get_tensor_by_name('detection_scores:0') classes = detection_graph.get_tensor_by_name('detection_classes:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0') (boxes, scores, classes, num_detections) = sess.run( [boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded}) # Visualization of the results of a detection.
tf.compat.v1.disable_v2_behavior() tf.TF_ENABLE_ONEDNN_OPTS = 0 # sys.path.insert(0, 'detectionElements') detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.compat.v1.GraphDef() tf.compat.v1.disable_v2_behavior() with tf.io.gfile.GFile('detectionElements/_detectionModel.pb', 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') NUM_CLASSES = 50 label_map = label_map_util.load_labelmap('detectionElements/person_label_map.pbtxt') categories = label_map_util.convert_label_map_to_categories( label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) def save_frame(frame_number, frame): frame_name = "0" * (5 - len(str(frame_number))) + str(frame_number) cv2.imwrite(f"videoFrames//frame_{frame_name}.jpg", frame) def empty_frames_folder(): for frame in os.listdir("videoFrames"): os.remove(f"videoFrames//{frame}") def frames_to_video(fps, output_folder='videoOut//'): output_folder += "//" image_folder = 'videoFrames//' video_name = output_folder + 'video.avi' images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")] # print(images) frame = cv2.imread(os.path.join(image_folder, images[0])) height, width, layers = frame.shape fourcc = cv2.VideoWriter_fourcc(*'XVID') video = cv2.VideoWriter(video_name, fourcc, fps, (width, height)) # print(video_name) for image in images: video.write(cv2.imread(os.path.join(image_folder, image))) copy2(video_name, video_name[:-4] + "_copy.avi") cv2.destroyAllWindows() video.release() User.frames_progress = 100 User.finished = True User.output_video = video_name def detect(): User.finished = False while User.wait: sleep(2) print("waiting") else: print("Got Video") User.wait = True high_res = User.high_res if not high_res: resized_video = resize_video(User.input_video_path) if resized_video: cap = cv2.VideoCapture(resized_video) print("resized video") User.input_video_path = resized_video print(cap) else: cap = cv2.VideoCapture(User.input_video_path) print("didnt resize video") # User.input_video_path = None print(User.input_video_path) print(cap) with open("uiElements//userSettings.txt", "r", encoding="utf-8") as f: settings = [line.split(" ")[-1] for line in f.read().split("\n")] include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color, output_path = settings output_path = output_path.replace("_SPACE_", " ") include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color = int(include_labels), int( include_crowd), int(include_accuracy), int(pedestrian_color), int(crowd_color) # in settings {1: "blue", 2: "purple", 3: "red", 4: "orange", 5: "yellow", 6: "green"} color_dict = {1: "#0094FF", 2: "#FF00F6", 3: "red", 4: "#FF6A00", 5: "yellow", 6: "#26FF5C"} # {1: "red", 2: "purple", 3: "blue", 4: "DodgerBlue", 5: "DeepSkyBlue", 6: "#00FF0C"} pedestrian_color = color_dict[pedestrian_color] crowd_color = color_dict[crowd_color] frame_rate = int(cap.get(cv2.CAP_PROP_FPS)) fps = cap.get(cv2.CAP_PROP_FPS) video_frames_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) frame_count = 0 video_frame_count = 0 pedestrian_count_second, crowd_count_second = [], [] # pedestrian_count_frame, crowd_count_frame = 0, 0 def begin(): empty_frames_folder() nonlocal frame_count, video_frame_count, pedestrian_color, crowd_color, cap with detection_graph.as_default(): with tf.compat.v1.Session(graph=detection_graph) as sess: frames_left = 100 # percent pedestrian_count_frame, crowd_count_frame = 0, 0 increment_progress_bar = 0 while True: frame_count += 1 if increment_progress_bar >= 2.28 * video_frames_total / 100: frames_left -= 2.28 User.frames_progress += 2 print( f"Processed {100 - frames_left:.2f}% of frames, {frames_left:.2f}% left. Progress Bar: {User.frames_progress}") increment_progress_bar = 0 # if frame_count == frame_rate+1: success, image_np = cap.read() if not success: print('EOF') break # flatten image using numpy image_np_expanded = np.expand_dims(image_np, axis=0) image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') boxes = detection_graph.get_tensor_by_name('detection_boxes:0') scores = detection_graph.get_tensor_by_name('detection_scores:0') classes = detection_graph.get_tensor_by_name('detection_classes:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0') (boxes, scores, classes, num_detections) = sess.run( [boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded}) # Visualization of the results of a detection.
_, tmp_pedestrian_count_frame, tmp_crowd_count_frame = drawing_tools.draw_boxes_on_image_array(
1
2023-12-26 18:39:25+00:00
4k
davidsvy/fractal_video
src/transform/build.py
[ { "identifier": "Transform_Contrastive", "path": "src/transform/contrastive.py", "snippet": "class Transform_Contrastive(nn.Module):\n\n def __init__(\n self, img_size, easy_k=False, randaugment_m=9, randaugment_n=2, n_steps=1,\n back=True, prob_perspective=0.0, prob_scale=0.0, prob_shift=0.0, prob_clone=0.0,\n prob_zoom=0.0, prob_shake=0.0,\n ):\n super(Transform_Contrastive, self).__init__()\n\n self.back = back\n\n self.mixup_q = Mixup_Background(\n img_size=img_size,\n n_steps=n_steps,\n prob_scale=prob_scale,\n prob_shift=prob_shift,\n prob_clone=prob_clone,\n )\n\n self.transform_q = Transform_Outer_Train(\n randaugment_m=randaugment_m,\n randaugment_n=randaugment_n,\n n_steps=n_steps,\n hor_flip=True,\n prob_blur=0.5,\n prob_perspective=prob_perspective,\n prob_shift=prob_shift,\n prob_zoom=prob_zoom,\n prob_shake=prob_shake,\n )\n\n if not easy_k:\n self.mixup_k = self.mixup_q\n self.transform_k = self.transform_q\n\n else:\n self.mixup_k = Mixup_Background(\n img_size=img_size,\n n_steps=n_steps,\n prob_scale=prob_scale,\n prob_shift=0,\n prob_clone=0,\n )\n\n self.transform_k = Transform_Outer_Train(\n randaugment_m=randaugment_m,\n randaugment_n=randaugment_n,\n n_steps=n_steps,\n hor_flip=True,\n prob_blur=0.5,\n prob_perspective=0,\n prob_shift=0,\n prob_zoom=0,\n prob_shake=0,\n )\n\n def temporal_reverse(self, video):\n # video -> [B, 2, C, T, H, W]\n if torch.rand(1).item() < 0.5:\n video = torch.flip(video, dims=(-3,))\n\n return video\n\n def forward(self, video, step):\n # video -> [B, 2, C, T, H, W]\n\n video = self.temporal_reverse(video)\n # video -> [B, 2, C, T, H, W]\n q, k = video[:, 0], video[:, 1]\n\n if self.back:\n q = self.mixup_q(q, step=step)\n k = self.mixup_k(k, step=step)\n\n q = self.transform_q(q, step=step)\n k = self.transform_k(k, step=step)\n\n return q, k" }, { "identifier": "Mixup_Background", "path": "src/transform/mixup.py", "snippet": "class Mixup_Background(nn.Module):\n\n def __init__(self, img_size, n_steps=1, prob_scale=0.0, prob_shift=0.0, prob_clone=0.0):\n super(Mixup_Background, self).__init__()\n\n self.transform_background_static = T.RandomResizedCrop(\n size=img_size,\n scale=(0.6, 1.0),\n interpolation=T.InterpolationMode.BICUBIC,\n )\n\n self.use_tr_back = prob_shift > 0\n self.transform_background = Transform_Background(\n prob=prob_shift,\n n_steps=n_steps,\n )\n\n self.transform_foreground = Transform_Foreground(\n prob_scale=prob_scale,\n prob_shift=prob_shift,\n prob_clone=prob_clone,\n n_steps=n_steps,\n )\n\n self.alpha_mixup = (0.25, 0.75)\n self.alpha_back = (0.25, 0.55)\n self.exp = (0.6, 1.0)\n\n self.prob_back_dynamic = 0.2\n self.prob_back_max = 0.05\n\n def sample_background(self, video):\n # video -> [B, C, T, H, W]\n B, _, T, *_ = video.shape\n device = video.device\n\n mask = torch.rand(B, device=device) < self.prob_back_dynamic\n B_dynamic = mask.sum().item()\n B_static = B - B_dynamic\n back_static, back_dynamic = None, None\n\n if B_static > 0:\n back_static = []\n for _ in range(2):\n idxs_batch = torch.randint(\n low=0, high=B, size=(B_static,), device=device)\n idxs_frame = torch.randint(\n low=0, high=T, size=(B_static,), device=device)\n # idxs_batch, idxs_frame -> [B_static]\n back_static.append(video[idxs_batch, :, idxs_frame])\n\n back_static = torch.amax(torch.stack(back_static, dim=0), dim=0)\n # back_static -> [B_static, C, H, W]\n\n if B_dynamic > 0:\n idxs_batch = torch.randint(\n low=0, high=B, size=(B_dynamic,), device=device)\n # idxs_batch -> [B_dynamic]\n\n time_min, time_max = 2, max(2, int(0.25 * T))\n time = random.randint(time_min, time_max)\n time_start = random.randint(0, T - time)\n \n idxs_time = torch.randint(-1, 2, size=(T,))\n idxs_time[0] = 0\n idxs_time = torch.clip(idxs_time.cumsum(dim=0), 0, time - 1)\n idxs_time = idxs_time + time_start\n # idxs_time -> [T]\n back_dynamic = video[idxs_batch][:, :, idxs_time]\n # back_dynamic -> [B_dynamic, C, T, H, W]\n\n return back_static, back_dynamic\n\n def mix_background(self, video, back_static, back_dynamic):\n \"\"\"Adds background frames sampled from the video batch itself.\n\n Inspired by:\n https://arxiv.org/pdf/2009.05769.pdf\n https://openaccess.thecvf.com/content/CVPR2022/papers/Ding_Motion-Aware_Contrastive_Video_Representation_Learning_via_Foreground-Background_Merging_CVPR_2022_paper.pdf\n \"\"\"\n # video -> [B, C, T, H, W]\n # back_static -> [B_static, C, T, H, W] or None\n # back_dynamic -> [B_dynamic, C, 1, H, W] or None\n B, device = video.shape[0], video.device\n\n if back_static is None:\n back = back_dynamic\n elif back_dynamic is None:\n back = back_static\n else:\n back = torch.cat([back_static, back_dynamic], axis=0)\n idxs_batch = torch.randperm(B, device=device)\n back = back[idxs_batch]\n\n mask_max = torch.rand(B, device=device) < self.prob_back_max\n B_add = B - mask_max.sum().item()\n\n video = torch.clip(video, 0, 1)\n back = torch.clip(back, 0, 1)\n\n if B_add > 0:\n size_alpha = (B_add, 1, 1, 1, 1)\n alpha = sample_uniform(self.alpha_back[0], self.alpha_back[1], size=size_alpha, device=device)\n\n video[:B_add] = ((1 - alpha) * video[:B_add] ** 0.75 + alpha * back[:B_add] ** 0.75)\n\n exp = sample_uniform(self.exp[0], self.exp[1], size=size_alpha, device=device)\n video[:B_add] = torch.clip(video[:B_add], 0, 1) ** exp\n\n if B_add < B:\n video[B_add:] = torch.maximum(video[B_add:], back[B_add:])\n\n return video\n\n def forward(self, video, step):\n \"\"\"Adds background frames sampled from the video batch itself.\n\n Inspired by:\n https://arxiv.org/pdf/2009.05769.pdf\n https://openaccess.thecvf.com/content/CVPR2022/papers/Ding_Motion-Aware_Contrastive_Video_Representation_Learning_via_Foreground-Background_Merging_CVPR_2022_paper.pdf\n \"\"\"\n # video -> [B, C, T, H, W]\n T = video.shape[2]\n #video = torch.clip(video, min=0, max=1) ** 0.75\n back_static, back_dynamic = self.sample_background(video)\n # back_static -> [B_static, C, H, W] or None\n # back_dynamic -> [B_dynamic, C, T, H, W] or None\n\n if back_static is not None:\n back_static = self.transform_background_static(back_static)\n\n if self.use_tr_back:\n back_static = self.transform_background(\n back_static, n_frames=T, step=step)\n # back_static -> [B_static, C, T, H, W]\n else:\n back_static = torch.repeat_interleave(\n back_static[:, :, None], repeats=T, dim=-3)\n # back_static -> [B_static, C, T, H, W]\n\n video = self.transform_foreground(video, step=step)\n\n video = self.mix_background(\n video=video, back_static=back_static, back_dynamic=back_dynamic)\n\n return video" }, { "identifier": "transform_inner_train", "path": "src/transform/compose.py", "snippet": "def transform_inner_train(crop_size=112, min_scale=0.5, interp='bicubic'):\n crop_tr = Tv.RandomResizedCrop(\n target_height=crop_size,\n target_width=crop_size,\n scale=(min_scale, 1),\n aspect_ratio=(3.0 / 4.0, 4.0 / 3.0),\n interpolation=interp,\n )\n \n transform = T.Compose([\n Tv.ConvertUint8ToFloat(),\n crop_tr,\n ])\n\n return transform" }, { "identifier": "transform_inner_val", "path": "src/transform/compose.py", "snippet": "def transform_inner_val(crop_size=112, resize=True, interp='bicubic', crop_inc=True):\n\n return T.Compose([\n Tv.ConvertUint8ToFloat(),\n Crop_Center(\n crop_size=crop_size,\n interpolation=interp,\n resize=resize,\n crop_inc=crop_inc,\n ),\n Tv.Normalize(\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD,\n ),\n ])" }, { "identifier": "Transform_Outer_Train", "path": "src/transform/compose.py", "snippet": "class Transform_Outer_Train(nn.Module):\n\n def __init__(\n self, randaugment_m=9, randaugment_n=2, n_steps=1, hor_flip=True, prob_blur=0.5,\n prob_perspective=0.0, prob_shift=0.0, prob_zoom=0.0, prob_shake=0.0,\n ):\n super(Transform_Outer_Train, self).__init__()\n\n self.use_perspective = prob_perspective > 0\n self.transform_perspective = Random_Perspective(\n p=prob_perspective,\n distortion_scale=0.7,\n )\n\n self.use_camera = prob_shift > 0 or prob_zoom > 0 or prob_shake > 0\n self.transform_camera = Transform_Camera(\n prob_shift=prob_shift,\n prob_zoom=prob_zoom,\n prob_shake=prob_shake,\n n_steps=n_steps,\n )\n\n transform = []\n\n if hor_flip:\n transform.append(T.RandomHorizontalFlip())\n\n if randaugment_m > 0:\n transform += [\n Tv.Permute((1, 0, 2, 3)),\n Tv.RandAugment(\n magnitude=randaugment_m,\n num_layers=randaugment_n,\n transform_hparas={'fill': (0., 0., 0.)}\n ),\n Tv.Permute((1, 0, 2, 3)),\n ]\n\n if prob_blur > 0:\n transform.append(T.RandomApply([T.GaussianBlur(3)], p=prob_blur))\n\n transform.append(\n Tv.Normalize(\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD,\n )\n )\n\n self.transform = T.Compose(transform)\n\n def forward(self, video, step):\n # video -> [B, C, T, H, W]\n\n # applied in parallel for all samples in batch\n if self.use_perspective:\n video = self.transform_perspective(video)\n\n if self.use_camera:\n video = self.transform_camera(video, step=step)\n\n # applied seqentially for all samples in batch\n video = torch.stack([self.transform(v) for v in video], dim=0)\n\n return video" } ]
from src.transform.contrastive import Transform_Contrastive from src.transform.mixup import Mixup_Background from src.transform.compose import ( transform_inner_train, transform_inner_val, Transform_Outer_Train, )
3,453
def transform_contrastive(config): n_steps = max(1, config.AUG.EPOCHS_CURRICULUM * config.STEPS_PER_EPOCH) transform = Transform_Contrastive( img_size=config.DATA.IMG_SIZE, easy_k=config.AUG.SSL_EASY_K, randaugment_m=config.AUG.AUTO_AUGMENT_M, randaugment_n=config.AUG.AUTO_AUGMENT_N, n_steps=n_steps, back=config.AUG.TYPE_MIXUP == 'back', prob_perspective=config.AUG.PROB_PERSPECTIVE, prob_scale=config.AUG.PROB_SCALE, prob_shift=config.AUG.PROB_SHIFT, prob_clone=config.AUG.PROB_CLONE, prob_zoom=config.AUG.PROB_ZOOM, prob_shake=config.AUG.PROB_SHAKE, ) return transform ########################################################################## ########################################################################## # OTHER ########################################################################## ########################################################################## def transform_inner(is_train, config): if is_train: transform = transform_inner_train( crop_size=config.DATA.IMG_SIZE, min_scale=config.AUG.MIN_SCALE, interp=config.AUG.INTERP, ) else:
def transform_contrastive(config): n_steps = max(1, config.AUG.EPOCHS_CURRICULUM * config.STEPS_PER_EPOCH) transform = Transform_Contrastive( img_size=config.DATA.IMG_SIZE, easy_k=config.AUG.SSL_EASY_K, randaugment_m=config.AUG.AUTO_AUGMENT_M, randaugment_n=config.AUG.AUTO_AUGMENT_N, n_steps=n_steps, back=config.AUG.TYPE_MIXUP == 'back', prob_perspective=config.AUG.PROB_PERSPECTIVE, prob_scale=config.AUG.PROB_SCALE, prob_shift=config.AUG.PROB_SHIFT, prob_clone=config.AUG.PROB_CLONE, prob_zoom=config.AUG.PROB_ZOOM, prob_shake=config.AUG.PROB_SHAKE, ) return transform ########################################################################## ########################################################################## # OTHER ########################################################################## ########################################################################## def transform_inner(is_train, config): if is_train: transform = transform_inner_train( crop_size=config.DATA.IMG_SIZE, min_scale=config.AUG.MIN_SCALE, interp=config.AUG.INTERP, ) else:
transform = transform_inner_val(
3
2023-12-27 19:43:45+00:00
4k
camenduru/ELYZA-japanese-Llama-2-13b-instruct-demo-hf
app.py
[ { "identifier": "get_input_token_length", "path": "model_vllm.py", "snippet": "def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int:\n prompt = get_prompt(message, chat_history, system_prompt)\n input_ids = tokenizer([prompt], return_tensors='np', add_special_tokens=False)['input_ids']\n return input_ids.shape[-1]" }, { "identifier": "run", "path": "model_vllm.py", "snippet": "async def run(\n message: str,\n chat_history: list[tuple[str, str]],\n system_prompt: str,\n max_new_tokens: int = 1024,\n temperature: float = 0.8,\n top_p: float = 0.95,\n top_k: int = 50,\n do_sample: bool = False,\n repetition_penalty: float = 1.2,\n stream: bool = False,\n) -> AsyncGenerator | str:\n request_id = random_uuid()\n prompt = get_prompt(message=message, chat_history=chat_history, system_prompt=system_prompt)\n\n if not do_sample:\n # greedy\n temperature = 0\n sampling_params = SamplingParams(\n max_tokens=max_new_tokens,\n temperature=temperature,\n top_p=top_p,\n top_k=top_k,\n repetition_penalty=repetition_penalty,\n )\n\n logger.info(f'queue: {request_id}')\n results_generator = engine.generate(\n prompt=prompt,\n sampling_params=sampling_params,\n request_id=request_id,\n )\n\n # Streaming case\n async def stream_results() -> AsyncGenerator:\n async for request_output in results_generator:\n yield ''.join([output.text for output in request_output.outputs])\n\n if stream:\n return stream_results()\n else:\n async for request_output in results_generator:\n pass\n return ''.join([output.text for output in request_output.outputs])" } ]
from datetime import datetime, timezone, timedelta from typing import AsyncGenerator from botocore.config import Config from model_vllm import get_input_token_length, run import os import time import uuid import asyncio import logging import textwrap import boto3 import gradio as gr import pandas as pd import torch
2,365
logging.basicConfig(encoding='utf-8', level=logging.ERROR) logger = logging.getLogger(__name__) JST = timezone(timedelta(hours=+9), 'JST') DEFAULT_SYSTEM_PROMPT = 'あなたは誠実で優秀な日本人のアシスタントです。' MAX_MAX_NEW_TOKENS = 2048 DEFAULT_MAX_NEW_TOKENS = 512 MAX_INPUT_TOKEN_LENGTH = 4000 TITLE = '# ELYZA-japanese-Llama-2-13b-instruct' DESCRIPTION = """ ## 概要 - [ELYZA-japanese-Llama-2-13b](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b)は、[株式会社ELYZA](https://elyza.ai/) (以降「当社」と呼称) が[Llama2](https://ai.meta.com/llama/)をベースとして日本語能力を拡張するために事前学習を行ったモデルです。 - [ELYZA-japanese-Llama-2-13b-instruct](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b-instruct)は ELYZA-japanese-Llama-2-13b を弊社独自のinstruction tuning用データセットで事後学習したモデルです。 - 本デモではこのモデルが使われています。 - 詳細は[Blog記事](https://note.com/elyza/n/n5d42686b60b7)を参照してください。 - 本デモではこちらの[Llama-2 7B Chat](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat)のデモをベースにさせていただきました。 ## License - Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved. ## 免責事項 - 当社は、本デモについて、ユーザーの特定の目的に適合すること、期待する機能・正確性・有用性を有すること、出力データが完全性、正確性、有用性を有すること、ユーザーによる本サービスの利用がユーザーに適用のある法令等に適合すること、継続的に利用できること、及び不具合が生じないことについて、明示又は黙示を問わず何ら保証するものではありません。 - 当社は、本デモに関してユーザーが被った損害等につき、一切の責任を負わないものとし、ユーザーはあらかじめこれを承諾するものとします。 - 当社は、本デモを通じて、ユーザー又は第三者の個人情報を取得することを想定しておらず、ユーザーは、本デモに、ユーザー又は第三者の氏名その他の特定の個人を識別することができる情報等を入力等してはならないものとします。 - ユーザーは、当社が本デモ又は本デモに使用されているアルゴリズム等の改善・向上に使用することを許諾するものとします。 ## 本デモで入力・出力されたデータの記録・利用に関して - 本デモで入力・出力されたデータは当社にて記録させていただき、今後の本デモ又は本デモに使用されているアルゴリズム等の改善・向上に使用させていただく場合がございます。 ## We are hiring! - 当社 (株式会社ELYZA) に興味のある方、ぜひお話ししませんか? - 機械学習エンジニア・インターン募集: https://open.talentio.com/r/1/c/elyza/homes/2507 - カジュアル面談はこちら: https://chillout.elyza.ai/elyza-japanese-llama2-13b """ _format_example = lambda s: textwrap.dedent(s).strip() examples = list(map(_format_example, [ """ 「キムチプリン」という新商品を考えています。この商品に対する世間の意見として想像されるものを箇条書きで3つ教えて """, """ 「メタリック」から「気分上々」までが自然につながるように、あいだの単語を連想してください。 """, """ 自律神経や副交感神経が乱れている、とはどのような状態ですか?科学的に教えて """, """ 日本国内で観光に行きたいと思っています。東京、名古屋、大阪、京都、福岡の特徴を表にまとめてください。 列名は「都道府県」「おすすめスポット」「おすすめグルメ」にしてください。 """, """ 私の考えた創作料理について、想像して説明を書いてください。 1. トマトマット 2. 餃子風もやし炒め 3. おにぎりすぎ """, ])) if not torch.cuda.is_available(): DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>' try: s3 = boto3.client( 's3', aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'], aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'], region_name=os.environ['S3_REGION'], config=Config( connect_timeout=5, read_timeout=5, retries={ 'mode': 'standard', 'total_max_attempts': 3, }, ), ) except Exception: logger.exception('Failed to initialize S3 client') def clear_and_save_textbox(message: str) -> tuple[str, str]: return '', message def display_input(message: str, history: list[tuple[str, str]]) -> list[tuple[str, str]]: history.append((message, '')) return history def delete_prev_fn(history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]: try: message, _ = history.pop() except IndexError: message = '' return history, message or '' async def generate( message: str, history_with_input: list[tuple[str, str]], system_prompt: str, max_new_tokens: int, temperature: float, top_p: float, top_k: int, do_sample: bool, repetition_penalty: float, ) -> AsyncGenerator[list[tuple[str, str]], None]: if max_new_tokens > MAX_MAX_NEW_TOKENS: raise ValueError history = history_with_input[:-1]
logging.basicConfig(encoding='utf-8', level=logging.ERROR) logger = logging.getLogger(__name__) JST = timezone(timedelta(hours=+9), 'JST') DEFAULT_SYSTEM_PROMPT = 'あなたは誠実で優秀な日本人のアシスタントです。' MAX_MAX_NEW_TOKENS = 2048 DEFAULT_MAX_NEW_TOKENS = 512 MAX_INPUT_TOKEN_LENGTH = 4000 TITLE = '# ELYZA-japanese-Llama-2-13b-instruct' DESCRIPTION = """ ## 概要 - [ELYZA-japanese-Llama-2-13b](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b)は、[株式会社ELYZA](https://elyza.ai/) (以降「当社」と呼称) が[Llama2](https://ai.meta.com/llama/)をベースとして日本語能力を拡張するために事前学習を行ったモデルです。 - [ELYZA-japanese-Llama-2-13b-instruct](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b-instruct)は ELYZA-japanese-Llama-2-13b を弊社独自のinstruction tuning用データセットで事後学習したモデルです。 - 本デモではこのモデルが使われています。 - 詳細は[Blog記事](https://note.com/elyza/n/n5d42686b60b7)を参照してください。 - 本デモではこちらの[Llama-2 7B Chat](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat)のデモをベースにさせていただきました。 ## License - Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved. ## 免責事項 - 当社は、本デモについて、ユーザーの特定の目的に適合すること、期待する機能・正確性・有用性を有すること、出力データが完全性、正確性、有用性を有すること、ユーザーによる本サービスの利用がユーザーに適用のある法令等に適合すること、継続的に利用できること、及び不具合が生じないことについて、明示又は黙示を問わず何ら保証するものではありません。 - 当社は、本デモに関してユーザーが被った損害等につき、一切の責任を負わないものとし、ユーザーはあらかじめこれを承諾するものとします。 - 当社は、本デモを通じて、ユーザー又は第三者の個人情報を取得することを想定しておらず、ユーザーは、本デモに、ユーザー又は第三者の氏名その他の特定の個人を識別することができる情報等を入力等してはならないものとします。 - ユーザーは、当社が本デモ又は本デモに使用されているアルゴリズム等の改善・向上に使用することを許諾するものとします。 ## 本デモで入力・出力されたデータの記録・利用に関して - 本デモで入力・出力されたデータは当社にて記録させていただき、今後の本デモ又は本デモに使用されているアルゴリズム等の改善・向上に使用させていただく場合がございます。 ## We are hiring! - 当社 (株式会社ELYZA) に興味のある方、ぜひお話ししませんか? - 機械学習エンジニア・インターン募集: https://open.talentio.com/r/1/c/elyza/homes/2507 - カジュアル面談はこちら: https://chillout.elyza.ai/elyza-japanese-llama2-13b """ _format_example = lambda s: textwrap.dedent(s).strip() examples = list(map(_format_example, [ """ 「キムチプリン」という新商品を考えています。この商品に対する世間の意見として想像されるものを箇条書きで3つ教えて """, """ 「メタリック」から「気分上々」までが自然につながるように、あいだの単語を連想してください。 """, """ 自律神経や副交感神経が乱れている、とはどのような状態ですか?科学的に教えて """, """ 日本国内で観光に行きたいと思っています。東京、名古屋、大阪、京都、福岡の特徴を表にまとめてください。 列名は「都道府県」「おすすめスポット」「おすすめグルメ」にしてください。 """, """ 私の考えた創作料理について、想像して説明を書いてください。 1. トマトマット 2. 餃子風もやし炒め 3. おにぎりすぎ """, ])) if not torch.cuda.is_available(): DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>' try: s3 = boto3.client( 's3', aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'], aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'], region_name=os.environ['S3_REGION'], config=Config( connect_timeout=5, read_timeout=5, retries={ 'mode': 'standard', 'total_max_attempts': 3, }, ), ) except Exception: logger.exception('Failed to initialize S3 client') def clear_and_save_textbox(message: str) -> tuple[str, str]: return '', message def display_input(message: str, history: list[tuple[str, str]]) -> list[tuple[str, str]]: history.append((message, '')) return history def delete_prev_fn(history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]: try: message, _ = history.pop() except IndexError: message = '' return history, message or '' async def generate( message: str, history_with_input: list[tuple[str, str]], system_prompt: str, max_new_tokens: int, temperature: float, top_p: float, top_k: int, do_sample: bool, repetition_penalty: float, ) -> AsyncGenerator[list[tuple[str, str]], None]: if max_new_tokens > MAX_MAX_NEW_TOKENS: raise ValueError history = history_with_input[:-1]
stream = await run(
1
2023-12-27 02:51:16+00:00
4k
camenduru/MotionCtrl-hf
lvdm/models/autoencoder.py
[ { "identifier": "DiagonalGaussianDistribution", "path": "lvdm/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self, noise=None):\n if noise is None:\n noise = torch.randn(self.mean.shape)\n \n x = self.mean + self.std * noise.to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "Decoder", "path": "lvdm/modules/networks/ae_modules.py", "snippet": "class Decoder(nn.Module):\n def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,\n resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,\n attn_type=\"vanilla\", **ignorekwargs):\n super().__init__()\n if use_linear_attn: attn_type = \"linear\"\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n self.give_pre_end = give_pre_end\n self.tanh_out = tanh_out\n\n # compute in_ch_mult, block_in and curr_res at lowest res\n in_ch_mult = (1,)+tuple(ch_mult)\n block_in = ch*ch_mult[self.num_resolutions-1]\n curr_res = resolution // 2**(self.num_resolutions-1)\n self.z_shape = (1,z_channels,curr_res,curr_res)\n print(\"AE working on z of shape {} = {} dimensions.\".format(\n self.z_shape, np.prod(self.z_shape)))\n\n # z to block_in\n self.conv_in = torch.nn.Conv2d(z_channels,\n block_in,\n kernel_size=3,\n stride=1,\n padding=1)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n self.mid.block_2 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n\n # upsampling\n self.up = nn.ModuleList()\n for i_level in reversed(range(self.num_resolutions)):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n block_out = ch*ch_mult[i_level]\n for i_block in range(self.num_res_blocks+1):\n block.append(ResnetBlock(in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout))\n block_in = block_out\n if curr_res in attn_resolutions:\n attn.append(make_attn(block_in, attn_type=attn_type))\n up = nn.Module()\n up.block = block\n up.attn = attn\n if i_level != 0:\n up.upsample = Upsample(block_in, resamp_with_conv)\n curr_res = curr_res * 2\n self.up.insert(0, up) # prepend to get consistent order\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(block_in,\n out_ch,\n kernel_size=3,\n stride=1,\n padding=1)\n\n def forward(self, z):\n #assert z.shape[1:] == self.z_shape[1:]\n self.last_z_shape = z.shape\n\n # print(f'decoder-input={z.shape}')\n # timestep embedding\n temb = None\n\n # z to block_in\n h = self.conv_in(z)\n # print(f'decoder-conv in feat={h.shape}')\n\n # middle\n h = self.mid.block_1(h, temb)\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n # print(f'decoder-mid feat={h.shape}')\n\n # upsampling\n for i_level in reversed(range(self.num_resolutions)):\n for i_block in range(self.num_res_blocks+1):\n h = self.up[i_level].block[i_block](h, temb)\n if len(self.up[i_level].attn) > 0:\n h = self.up[i_level].attn[i_block](h)\n # print(f'decoder-up feat={h.shape}')\n if i_level != 0:\n h = self.up[i_level].upsample(h)\n # print(f'decoder-upsample feat={h.shape}')\n\n # end\n if self.give_pre_end:\n return h\n\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n # print(f'decoder-conv_out feat={h.shape}')\n if self.tanh_out:\n h = torch.tanh(h)\n return h" }, { "identifier": "Encoder", "path": "lvdm/modules/networks/ae_modules.py", "snippet": "class Encoder(nn.Module):\n def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,\n resolution, z_channels, double_z=True, use_linear_attn=False, attn_type=\"vanilla\",\n **ignore_kwargs):\n super().__init__()\n if use_linear_attn: attn_type = \"linear\"\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n\n # downsampling\n self.conv_in = torch.nn.Conv2d(in_channels,\n self.ch,\n kernel_size=3,\n stride=1,\n padding=1)\n\n curr_res = resolution\n in_ch_mult = (1,)+tuple(ch_mult)\n self.in_ch_mult = in_ch_mult\n self.down = nn.ModuleList()\n for i_level in range(self.num_resolutions):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n block_in = ch*in_ch_mult[i_level]\n block_out = ch*ch_mult[i_level]\n for i_block in range(self.num_res_blocks):\n block.append(ResnetBlock(in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout))\n block_in = block_out\n if curr_res in attn_resolutions:\n attn.append(make_attn(block_in, attn_type=attn_type))\n down = nn.Module()\n down.block = block\n down.attn = attn\n if i_level != self.num_resolutions-1:\n down.downsample = Downsample(block_in, resamp_with_conv)\n curr_res = curr_res // 2\n self.down.append(down)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n self.mid.block_2 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(block_in,\n 2*z_channels if double_z else z_channels,\n kernel_size=3,\n stride=1,\n padding=1)\n\n def forward(self, x):\n # timestep embedding\n temb = None\n\n # print(f'encoder-input={x.shape}')\n # downsampling\n hs = [self.conv_in(x)]\n # print(f'encoder-conv in feat={hs[0].shape}')\n for i_level in range(self.num_resolutions):\n for i_block in range(self.num_res_blocks):\n h = self.down[i_level].block[i_block](hs[-1], temb)\n # print(f'encoder-down feat={h.shape}')\n if len(self.down[i_level].attn) > 0:\n h = self.down[i_level].attn[i_block](h)\n hs.append(h)\n if i_level != self.num_resolutions-1:\n # print(f'encoder-downsample (input)={hs[-1].shape}')\n hs.append(self.down[i_level].downsample(hs[-1]))\n # print(f'encoder-downsample (output)={hs[-1].shape}')\n\n # middle\n h = hs[-1]\n h = self.mid.block_1(h, temb)\n # print(f'encoder-mid1 feat={h.shape}')\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n # print(f'encoder-mid2 feat={h.shape}')\n\n # end\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n # print(f'end feat={h.shape}')\n return h" }, { "identifier": "instantiate_from_config", "path": "utils/utils.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" } ]
import os import numpy as np import pytorch_lightning as pl import torch import torch.nn.functional as F from contextlib import contextmanager from einops import rearrange from lvdm.distributions import DiagonalGaussianDistribution from lvdm.modules.networks.ae_modules import Decoder, Encoder from utils.utils import instantiate_from_config
2,877
class AutoencoderKL(pl.LightningModule): def __init__(self, ddconfig, lossconfig, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, test=False, logdir=None, input_dim=4, test_args=None, ): super().__init__() self.image_key = image_key
class AutoencoderKL(pl.LightningModule): def __init__(self, ddconfig, lossconfig, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, test=False, logdir=None, input_dim=4, test_args=None, ): super().__init__() self.image_key = image_key
self.encoder = Encoder(**ddconfig)
2
2023-12-27 19:32:03+00:00
4k
bitstuffing/pychat
core/bing.py
[ { "identifier": "Browser", "path": "core/browser.py", "snippet": "class Browser():\n\n USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:120.0) Gecko/20100101 Firefox/120.0'\n USER_AGENT_EDGE = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0\"\n STEAM_BUFFER_SIZE = 512\n\n headers = {\n 'User-Agent': USER_AGENT\n }\n\n def __init__(self):\n self.session = requests.Session()\n\n def getInternetIpAddress(self):\n return requests.get('https://api.ipify.org').text\n \n def getTimeStamp(self):\n # format current time in \"2023-12-02T23:27:26+01:00\" format\n return datetime.datetime.now().astimezone().isoformat()\n \n def solveCaptchaChrome(self, captchaUrl):\n from selenium import webdriver\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support.ui import WebDriverWait\n from selenium.webdriver.support import expected_conditions as EC\n import json\n\n options = webdriver.ChromeOptions()\n #options.add_argument(\"--headless\")\n\n driver = webdriver.Chrome(options=options)\n driver.get(captchaUrl) \n \n # Espera hasta que el elemento con el ID 'success-text' sea visible\n try:\n success_text = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"success-text\"))\n )\n cookies = driver.get_cookie_string()\n driver.quit()\n cookies = json.loads(cookies) if cookies else {}\n print(\"Elemento encontrado y cookies obtenidas.\")\n return cookies\n except:\n driver.quit()\n print(\"Elemento no encontrado después de 10 segundos.\")\n return {}\n \n def solveCaptchaFirefox(self, captchaUrl, other):\n from selenium import webdriver\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support.ui import WebDriverWait\n from selenium.webdriver.support import expected_conditions as EC\n import json, time\n\n options = webdriver.FirefoxOptions()\n\n options.add_argument('--no-first-run --no-service-autorun --password-store=basic')\n options.set_preference(\"window-size\", \"1920,1080\")\n\n options.set_preference(\"dom.webdriver.enabled\", False)\n options.set_preference(\"dom.webnotifications.enabled\", False)\n\n profile = webdriver.FirefoxProfile()\n profile.set_preference(\"dom.webdriver.enabled\", False)\n profile.set_preference(\"dom.webnotifications.enabled\", False)\n\n service = webdriver.FirefoxService(firefox_profile = profile)\n\n driver = webdriver.Firefox(\n service= service,\n options = options\n )\n driver.execute_script(\"Object.defineProperty(navigator, 'webdriver', {get: () => undefined})\")\n\n #options.add_argument(\"--headless\")\n \n time.sleep(2)\n driver.get(captchaUrl) \n\n # wait for 'success-text'\n try:\n success_text = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"success-text\"))\n )\n cookies = driver.get_cookie_string()\n driver.quit()\n cookies = json.loads(cookies) if cookies else {}\n print(\"found element, coockes obtained.\")\n return cookies\n except:\n screenshot = driver.get_screenshot_as_png()\n with open('screenshot.png', 'wb') as f:\n f.write(screenshot)\n driver.quit()\n print(\"element not found :'(\")\n return {}\n \n def solveCaptcha2(self, captchaUrl, mainUrl):\n # call to solveCaptchaAsync\n #return asyncio.run(self.solveCaptchaAsync(captchaUrl, mainUrl))\n self.solveCaptchaSelenium(captchaUrl, mainUrl)\n \n def solveCaptchaSelenium(self, captchaUrl, mainUrl):\n from undetected_chromedriver import Chrome, ChromeOptions\n from selenium import webdriver\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support.ui import WebDriverWait\n from selenium.webdriver.support import expected_conditions as EC\n import json\n import time\n\n options = ChromeOptions()\n options.add_argument(\"--blink-settings=imagesEnabled=false\")\n options.add_argument(\"--disable-blink-features=AutomationControlled\")\n options.add_argument(\"--disable-infobars\")\n options.add_argument(\"--disable-notifications\")\n options.add_argument(\"--headless=new\")\n #options.add_argument(\"--start-maximized\")\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-dev-shm-usage\")\n #options.add_argument(\"--disable-renderer-backgrounding\")\n #options.add_argument(\"--disable-background-timer-throttling\")\n #options.add_argument(\"--disable-backgrounding-occluded-windows\")\n options.add_argument(\"--disable-client-side-phishing-detection\")\n options.add_argument(\"--disable-crash-reporter\")\n options.add_argument(\"--disable-oopr-debug-crash-dump\")\n options.add_argument(\"--no-crash-upload\")\n #options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--disable-extensions\")\n #options.add_argument(\"--disable-low-res-tiling\")\n #options.add_argument(\"--log-level=3\")\n #options.add_argument(\"--silent\")\n \n options.add_argument('--no-first-run --no-service-autorun --password-store=basic')\n #options.add_argument(\"user-agent=\"+self.USER_AGENT_EDGE)\n driver = Chrome(use_subprocess=False, options=options)\n driver.execute_script(\"Object.defineProperty(navigator, 'webdriver', {get: () => undefined})\")\n driver.execute_cdp_cmd('Network.setUserAgentOverride', {\"userAgent\": self.USER_AGENT_EDGE})\n print(driver.execute_script(\"return navigator.userAgent;\"))\n \n time.sleep(4)\n driver.get(mainUrl) \n # wait 4 seconds\n time.sleep(4)\n\n # Open captchaUrl in a new tab\n driver.execute_script(\"window.location.href = '{}';\".format(captchaUrl))\n \n try:\n ## error is caused by:\n ## https://challenges.cloudflare.com/cdn-cgi/challenge-platform/h/b/flow/ov1/39531545:1701774410:PuGLG-Yh-DxTHbLa2ykrQt6-LUcl6TTFFDl_iPlFLiM/830c05699a752fab/bfb90721bb496c6 400 code\n success_text = WebDriverWait(driver, 40).until(\n EC.presence_of_element_located((By.ID, \"success-text\"))\n )\n cookies = driver.get_cookie_string()\n driver.quit()\n cookies = json.loads(cookies) if cookies else {}\n print(\"found element, coockes obtained.\")\n return cookies\n except:\n\n driver.execute_script(\"window.location.href = '{}';\".format(captchaUrl))\n\n try:\n success_text = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"success-text\"))\n )\n cookies = driver.get_cookie_string()\n driver.quit()\n cookies = json.loads(cookies) if cookies else {}\n print(\"element found, coockes obtained.\")\n return cookies\n except:\n # store screenshot\n screenshot = driver.get_screenshot_as_png()\n with open('screenshot.png', 'wb') as f:\n f.write(screenshot)\n driver.quit()\n print(\"Element not found after 10 seconds.\")\n return {}\n \n def launch_firefox(self, url, timeout = 10):\n process = subprocess.Popen(['/usr/bin/firefox', '-url', url], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if timeout:\n time.sleep(timeout)\n process.kill()\n\n \n def get_cookies_from_internal_storage(self, ff_cookies):\n con = sqlite3.connect(ff_cookies)\n cur = con.cursor()\n cur.execute(\"SELECT host, path, isSecure, expiry, name, value FROM moz_cookies where host like '%.bing.com%'\")\n cookie_string = \"\"\n for item in cur.fetchall():\n #c = http.cookiejar.Cookie(0, item[4], item[5], None, False, item[0], item[0].startswith('.'), item[0].startswith('.'), item[1], False, item[2], item[3], item[3] == \"\", None, None, {})\n # get the cookie string to put in requests.get sentence \n cookie_string += f\"{item[4]}={item[5]}; \"\n \n return cookie_string\n\n def extractCookiesFromRealFirefox(self, url):\n \n self.launch_firefox(url, 5)\n\n return self.extractFirefoxCookies()\n\n def extractFirefoxCookies(self):\n firefox_cookies = \"/tmp/cookies.sqlite\"\n\n # get the system content of the file ~/.mozilla/firefox/profiles.ini\n response = os.popen(\"cat ~/.mozilla/firefox/profiles.ini\").read()\n #print(response)\n # extract with regex all lines starting with Path= and without the \"Path=\" string\n regex = \"(?<=Path=)(.*)\"\n firefox_cookies_file = re.findall(regex, response)\n #print(\"checking for firefox cookies in:\")\n cookies = \"\"\n # copy the firefox_cookies_files to /tmp if isset in system\n for file in firefox_cookies_file:\n next_file = expanduser(f\"~/.mozilla/firefox/{file}/cookies.sqlite\")\n if(os.path.isfile(next_file)):\n os.system(f\"cp {next_file} /tmp\")\n cookies = self.get_cookies_from_internal_storage(firefox_cookies)\n #print(f\"Firefox cookies from {next_file} copied to {firefox_cookies}\")\n # remove temp file\n os.system(f\"rm {firefox_cookies}\")\n else:\n print(f\"File {next_file} not found.\")\n return cookies" }, { "identifier": "BingResponse", "path": "core/helpers/binghelper.py", "snippet": "class BingResponse:\n def __init__(self, response):\n if \"type\" in response: \n if response.get('type') == 1:\n self.chatmessage = BingMessageType1(\n response.get('type'),\n response.get('target'),\n response.get('arguments')\n )\n elif response.get('type') == 2:\n self.chatmessage = BingMessageType2(\n response.get('type'),\n response.get('invocationId'),\n response.get('item')\n )" }, { "identifier": "BingMessageType", "path": "core/helpers/binghelper.py", "snippet": "class BingMessageType:\n def __init__(self,type,invocationId):\n self.type = type\n self.invocationId = invocationId" }, { "identifier": "BingMessageType1", "path": "core/helpers/binghelper.py", "snippet": "class BingMessageType1:\n def __init__(self,type,target,arguments):\n self.type = type\n self.target = target\n self.arguments = ChatArguments(arguments)" }, { "identifier": "BingMessageType2", "path": "core/helpers/binghelper.py", "snippet": "class BingMessageType2(BingMessageType):\n def __init__(self,type,invocationId,item):\n super().__init__(type,invocationId)\n self.item = BingItem(item.get('messages'),item.get('firstNewMessageIndex'),item.get('defaultChatName'),item.get('conversationId'),item.get('requestId'),item.get('conversationExpiryTime'),item.get('shouldInitiateConversation'),item.get('telemetry'),item.get('throttling'),item.get('result'))" }, { "identifier": "BingTextResponse", "path": "core/helpers/binghelper.py", "snippet": "class BingTextResponse:\n def __init__(self, text = None, offset = None, duration = None, recognitionStatus = None, displayText = None, primaryLanguage = None):\n self.text = text\n self.offset = offset\n self.duration = duration\n self.recognitionStatus = recognitionStatus\n self.displayText = displayText\n if primaryLanguage is not None:\n self.primaryLanguage = BingPrimaryLanguage(primaryLanguage.get('Language'),primaryLanguage.get('Confidence'))" } ]
from core.browser import Browser from aiohttp import ClientSession from dateutil.tz import tzutc from core.helpers.binghelper import BingResponse, BingMessageType, BingMessageType1, BingMessageType2, BingTextResponse import json import uuid import os import re import asyncio import aiohttp import string import random import requests import urllib import urllib.parse import datetime import threading import queue import speech_recognition as sr import traceback import time import time
3,066
class AudioRecorder(threading.Thread): def __init__(self, sample_rate=22500): threading.Thread.__init__(self) self.queue = queue.Queue() self.exit = False self.recognizer = sr.Recognizer() self.mic = sr.Microphone(sample_rate=sample_rate) def getQueue(self): return self.queue def getExit(self): return self.exit def setExit(self, exit): self.exit = exit def run(self): with self.mic as source: while not self.exit: audio = self.recognizer.record(source, duration=1) self.queue.put(audio.frame_data)
class AudioRecorder(threading.Thread): def __init__(self, sample_rate=22500): threading.Thread.__init__(self) self.queue = queue.Queue() self.exit = False self.recognizer = sr.Recognizer() self.mic = sr.Microphone(sample_rate=sample_rate) def getQueue(self): return self.queue def getExit(self): return self.exit def setExit(self, exit): self.exit = exit def run(self): with self.mic as source: while not self.exit: audio = self.recognizer.record(source, duration=1) self.queue.put(audio.frame_data)
class Bing(Browser):
0
2023-12-28 19:45:49+00:00
4k
vita-epfl/social-transmotion
evaluate_jrdb.py
[ { "identifier": "batch_process_coords", "path": "dataset_jrdb.py", "snippet": "def batch_process_coords(coords, masks, padding_mask, config, modality_selection='traj+2dbox', training=False, multiperson=True):\n joints = coords.to(config[\"DEVICE\"])\n masks = masks.to(config[\"DEVICE\"])\n in_F = config[\"TRAIN\"][\"input_track_size\"]\n \n in_joints_pelvis = joints[:,:, (in_F-1):in_F, 0:1, :].clone()\n in_joints_pelvis_last = joints[:,:, (in_F-2):(in_F-1), 0:1, :].clone()\n\n joints[:,:,:,0] = joints[:,:,:,0] - joints[:,0:1, (in_F-1):in_F, 0]\n joints[:,:,:,1:] = (joints[:,:,:,1:] - joints[:,:,(in_F-1):in_F,1:])*0.25 #rescale for BB\n\n B, N, F, J, K = joints.shape\n if not training:\n if modality_selection=='traj':\n joints[:,:,:,1:]=0\n elif modality_selection=='traj+2dbox':\n pass\n else:\n print('modality error')\n exit()\n else:\n # augment JRDB traj\n joints[:,:,:,0,:3] = getRandomRotatePoseTransform(config)(joints[:,:,:,0,:3])\n joints = joints.transpose(1, 2).reshape(B, F, N*J, K)\n in_joints_pelvis = in_joints_pelvis.reshape(B, 1, N, K)\n in_joints_pelvis_last = in_joints_pelvis_last.reshape(B, 1, N, K)\n masks = masks.transpose(1, 2).reshape(B, F, N*J)\n\n in_F, out_F = config[\"TRAIN\"][\"input_track_size\"], config[\"TRAIN\"][\"output_track_size\"] \n in_joints = joints[:,:in_F].float()\n out_joints = joints[:,in_F:in_F+out_F].float()\n in_masks = masks[:,:in_F].float()\n out_masks = masks[:,in_F:in_F+out_F].float()\n\n \n return in_joints, in_masks, out_joints, out_masks, padding_mask.float()" }, { "identifier": "create_dataset", "path": "dataset_jrdb.py", "snippet": "def create_dataset(dataset_name, logger, **args):\n logger.info(\"Loading dataset \" + dataset_name)\n\n if dataset_name == 'jta_all_visual_cues':\n dataset = JtaAllVisualCuesDataset(**args)\n elif dataset_name == 'jrdb_2dbox':\n dataset = Jrdb2dboxDataset(**args)\n else:\n raise ValueError(f\"Dataset with name '{dataset_name}' not found.\")\n \n return dataset" }, { "identifier": "collate_batch", "path": "dataset_jrdb.py", "snippet": "def collate_batch(batch):\n joints_list = []\n masks_list = []\n num_people_list = []\n for joints, masks in batch:\n \n joints_list.append(joints)\n masks_list.append(masks)\n num_people_list.append(torch.zeros(joints.shape[0]))\n \n joints = pad_sequence(joints_list, batch_first=True)\n masks = pad_sequence(masks_list, batch_first=True)\n padding_mask = pad_sequence(num_people_list, batch_first=True, padding_value=1).bool()\n\n return joints, masks, padding_mask" }, { "identifier": "create_model", "path": "model_jrdb.py", "snippet": "def create_model(config, logger):\n seq_len = config[\"MODEL\"][\"seq_len\"]\n token_num = config[\"MODEL\"][\"token_num\"]\n nhid=config[\"MODEL\"][\"dim_hidden\"]\n nhead=config[\"MODEL\"][\"num_heads\"]\n nlayers_local=config[\"MODEL\"][\"num_layers_local\"]\n nlayers_global=config[\"MODEL\"][\"num_layers_global\"]\n dim_feedforward=config[\"MODEL\"][\"dim_feedforward\"]\n\n if config[\"MODEL\"][\"type\"] == \"transmotion\":\n logger.info(\"Creating bert model.\")\n model = TransMotion(tok_dim=seq_len,\n nhid=nhid,\n nhead=nhead,\n dim_feedfwd=dim_feedforward,\n nlayers_local=nlayers_local,\n nlayers_global=nlayers_global,\n output_scale=config[\"MODEL\"][\"output_scale\"],\n obs_and_pred=config[\"TRAIN\"][\"input_track_size\"] + config[\"TRAIN\"][\"output_track_size\"],\n num_tokens=token_num,\n device=config[\"DEVICE\"]\n ).to(config[\"DEVICE\"]).float()\n else:\n raise ValueError(f\"Model type '{config['MODEL']['type']}' not found\")\n\n return model" }, { "identifier": "create_logger", "path": "utils/utils.py", "snippet": "def create_logger(logdir):\n head = '%(asctime)-15s %(message)s'\n if logdir != '':\n log_file = os.path.join(logdir, 'log.txt')\n logging.basicConfig(filename=log_file, format=head)\n # output to console as well\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n else:\n logging.basicConfig(format=head)\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n return logger" } ]
import argparse import torch import random import numpy as np from progress.bar import Bar from torch.utils.data import DataLoader from dataset_jrdb import batch_process_coords, create_dataset, collate_batch from model_jrdb import create_model from utils.utils import create_logger
2,246
def inference(model, config, input_joints, padding_mask, out_len=14): model.eval() with torch.no_grad(): pred_joints = model(input_joints, padding_mask) output_joints = pred_joints[:,-out_len:] return output_joints def evaluate_ade_fde(model, modality_selection, dataloader, bs, config, logger, return_all=False, bar_prefix="", per_joint=False, show_avg=False): in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size'] bar = Bar(f"EVAL ADE_FDE", fill="#", max=len(dataloader)) batch_size = bs batch_id = 0 ade = 0 fde = 0 ade_batch = 0 fde_batch = 0 for i, batch in enumerate(dataloader): joints, masks, padding_mask = batch padding_mask = padding_mask.to(config["DEVICE"]) in_joints, in_masks, out_joints, out_masks, padding_mask = batch_process_coords(joints, masks, padding_mask, config, modality_selection) pred_joints = inference(model, config, in_joints, padding_mask, out_len=out_F) out_joints = out_joints.cpu() pred_joints = pred_joints.cpu().reshape(out_joints.size(0), 12, 1, 2) for k in range(len(out_joints)): person_out_joints = out_joints[k,:,0:1] person_pred_joints = pred_joints[k,:,0:1] gt_xy = person_out_joints[:,0,:2] pred_xy = person_pred_joints[:,0,:2] sum_ade = 0 for t in range(12): d1 = (gt_xy[t,0].detach().cpu().numpy() - pred_xy[t,0].detach().cpu().numpy()) d2 = (gt_xy[t,1].detach().cpu().numpy() - pred_xy[t,1].detach().cpu().numpy()) dist_ade = [d1,d2] sum_ade += np.linalg.norm(dist_ade) sum_ade /= 12 ade_batch += sum_ade d3 = (gt_xy[-1,0].detach().cpu().numpy() - pred_xy[-1,0].detach().cpu().numpy()) d4 = (gt_xy[-1,1].detach().cpu().numpy() - pred_xy[-1,1].detach().cpu().numpy()) dist_fde = [d3,d4] scene_fde = np.linalg.norm(dist_fde) fde_batch += scene_fde batch_id+=1 ade = ade_batch/((batch_id-1)*batch_size+len(out_joints)) fde = fde_batch/((batch_id-1)*batch_size+len(out_joints)) return ade, fde if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--ckpt", type=str, help="checkpoint path") parser.add_argument("--split", type=str, default="test", help="Split to use. one of [train, test, valid]") parser.add_argument("--metric", type=str, default="vim", help="Evaluation metric. One of (vim, mpjpe)") parser.add_argument("--modality", type=str, default="traj+2dbox", help="available modality combination from['traj','traj+2dbox']") args = parser.parse_args() random.seed(0) np.random.seed(0) torch.manual_seed(0) ################################ # Load checkpoint ################################ logger = create_logger('') logger.info(f'Loading checkpoint from {args.ckpt}') ckpt = torch.load(args.ckpt, map_location = torch.device('cpu')) config = ckpt['config'] if torch.cuda.is_available(): config["DEVICE"] = f"cuda:{torch.cuda.current_device()}" torch.cuda.manual_seed(0) else: config["DEVICE"] = "cpu" logger.info("Initializing with config:") logger.info(config) ################################ # Initialize model ################################ model = create_model(config, logger) model.load_state_dict(ckpt['model']) ################################ # Load data ################################ in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size'] assert in_F == 9 assert out_F == 12 name = config['DATA']['train_datasets']
def inference(model, config, input_joints, padding_mask, out_len=14): model.eval() with torch.no_grad(): pred_joints = model(input_joints, padding_mask) output_joints = pred_joints[:,-out_len:] return output_joints def evaluate_ade_fde(model, modality_selection, dataloader, bs, config, logger, return_all=False, bar_prefix="", per_joint=False, show_avg=False): in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size'] bar = Bar(f"EVAL ADE_FDE", fill="#", max=len(dataloader)) batch_size = bs batch_id = 0 ade = 0 fde = 0 ade_batch = 0 fde_batch = 0 for i, batch in enumerate(dataloader): joints, masks, padding_mask = batch padding_mask = padding_mask.to(config["DEVICE"]) in_joints, in_masks, out_joints, out_masks, padding_mask = batch_process_coords(joints, masks, padding_mask, config, modality_selection) pred_joints = inference(model, config, in_joints, padding_mask, out_len=out_F) out_joints = out_joints.cpu() pred_joints = pred_joints.cpu().reshape(out_joints.size(0), 12, 1, 2) for k in range(len(out_joints)): person_out_joints = out_joints[k,:,0:1] person_pred_joints = pred_joints[k,:,0:1] gt_xy = person_out_joints[:,0,:2] pred_xy = person_pred_joints[:,0,:2] sum_ade = 0 for t in range(12): d1 = (gt_xy[t,0].detach().cpu().numpy() - pred_xy[t,0].detach().cpu().numpy()) d2 = (gt_xy[t,1].detach().cpu().numpy() - pred_xy[t,1].detach().cpu().numpy()) dist_ade = [d1,d2] sum_ade += np.linalg.norm(dist_ade) sum_ade /= 12 ade_batch += sum_ade d3 = (gt_xy[-1,0].detach().cpu().numpy() - pred_xy[-1,0].detach().cpu().numpy()) d4 = (gt_xy[-1,1].detach().cpu().numpy() - pred_xy[-1,1].detach().cpu().numpy()) dist_fde = [d3,d4] scene_fde = np.linalg.norm(dist_fde) fde_batch += scene_fde batch_id+=1 ade = ade_batch/((batch_id-1)*batch_size+len(out_joints)) fde = fde_batch/((batch_id-1)*batch_size+len(out_joints)) return ade, fde if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--ckpt", type=str, help="checkpoint path") parser.add_argument("--split", type=str, default="test", help="Split to use. one of [train, test, valid]") parser.add_argument("--metric", type=str, default="vim", help="Evaluation metric. One of (vim, mpjpe)") parser.add_argument("--modality", type=str, default="traj+2dbox", help="available modality combination from['traj','traj+2dbox']") args = parser.parse_args() random.seed(0) np.random.seed(0) torch.manual_seed(0) ################################ # Load checkpoint ################################ logger = create_logger('') logger.info(f'Loading checkpoint from {args.ckpt}') ckpt = torch.load(args.ckpt, map_location = torch.device('cpu')) config = ckpt['config'] if torch.cuda.is_available(): config["DEVICE"] = f"cuda:{torch.cuda.current_device()}" torch.cuda.manual_seed(0) else: config["DEVICE"] = "cpu" logger.info("Initializing with config:") logger.info(config) ################################ # Initialize model ################################ model = create_model(config, logger) model.load_state_dict(ckpt['model']) ################################ # Load data ################################ in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size'] assert in_F == 9 assert out_F == 12 name = config['DATA']['train_datasets']
dataset = create_dataset(name[0], logger, split=args.split, track_size=(in_F+out_F), track_cutoff=in_F)
1
2023-12-25 15:12:40+00:00
4k
AzizKpln/AutoIOC-MISP
main.py
[ { "identifier": "runAbuseIP", "path": "Integrations/abuseipdb.py", "snippet": "def runAbuseIP(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n url = 'https://api.abuseipdb.com/api/v2/blacklist'\r\n querystring = {\r\n 'confidenceMinimum':'85'\r\n }\r\n headers = {\r\n 'Accept': 'application/json',\r\n 'Key': '438fad635ef39a0a143ffe7ab3f77ecba1f1cae0ef974c6ce16bd5ae6199b104fb780d282f2b1e9e'\r\n }\r\n print(\"[+] Geting IOC List From AbuseipDB. Please wait!\")\r\n response = requests.request(method='GET', url=url, headers=headers, params=querystring)\r\n decodedResponse = json.loads(response.text)\r\n def extract_ip_addresses(data):\r\n try:\r\n json_data = json.loads(data)\r\n ip_addresses = [entry[\"ipAddress\"] for entry in json_data.get(\"data\", [])]\r\n return ip_addresses\r\n except json.JSONDecodeError as e:\r\n return f\"Error decoding JSON: {e}\"\r\n ip_addresses = extract_ip_addresses(json.dumps(decodedResponse, sort_keys=True, indent=4))\r\n for i in ip_addresses:\r\n upload_attr(i)\r" }, { "identifier": "runCinsScore", "path": "Integrations/cinsscore.py", "snippet": "def runCinsScore(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"https://cinsscore.com/list/ci-badguys.txt\").text\r\n ipAddr=req.split(\"\\n\")\r\n for i in ipAddr:\r\n upload_attr(i)\r" }, { "identifier": "runKillnet", "path": "Integrations/killnet.py", "snippet": "def runKillnet(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n \r\n req=requests.get(\"https://raw.githubusercontent.com/securityscorecard/SSC-Threat-Intel-IoCs/master/KillNet-DDoS-Blocklist/ipblocklist.txt\").text\r\n iocs=req.split(\"\\n\")\r\n for ioc in iocs:\r\n upload_attr(ioc)\r" }, { "identifier": "runEmergingThreats", "path": "Integrations/emergingthreats.py", "snippet": "def runEmergingThreats(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n print(\"[+] Getting IOC List From Emergingthreats. Please wait!\")\r\n req=requests.get(\"https://rules.emergingthreats.net/blockrules/compromised-ips.txt\").text\r\n ipAddr=req.split(\"\\n\")\r\n for i in ipAddr:\r\n upload_attr(i)\r" }, { "identifier": "runHoneyDB", "path": "Integrations/honeydb.py", "snippet": "def runHoneyDB(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n url = 'https://honeydb.io/api/bad-hosts'\r\n headers = {\r\n 'Accept': 'application/json',\r\n 'X-HoneyDb-ApiId':'93a1d7d4f552a782cf120958f99f86abad8397985161dd78892b3348a467aa91',\r\n 'X-HoneyDb-ApiKey': '022d5de38750eb4bfea60f6dbd49de839e08bfc12aeae3b0e1de2de935ff2b59'\r\n }\r\n response = requests.request(method='GET', url=url, headers=headers)\r\n decodedResponse = json.loads(response.text)\r\n for i in decodedResponse:\r\n if str(i[\"last_seen\"])==str(current_date):\r\n upload_attr(str(i[\"remote_host\"]))\r" }, { "identifier": "runMaltiverse", "path": "Integrations/maltiverse.py", "snippet": "def runMaltiverse(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n api_key = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjIzMTkzNTg5MzcsImlhdCI6MTY4ODYzODkzNywic3ViIjoxNjA5NSwidXNlcm5hbWUiOiJheml6a2FwbGFuMTkwNyIsImFkbWluIjpmYWxzZSwidGVhbV9pZCI6bnVsbCwidGVhbV9uYW1lIjpudWxsLCJ0ZWFtX2xlYWRlciI6ZmFsc2UsInRlYW1fcmVzZWFyY2hlciI6ZmFsc2UsInRlYW1faW5kZXgiOm51bGwsImFwaV9saW1pdCI6MTAwfQ.JLvydZA3dd-fKO0TZQzHlU0ckoBDfpVQGEk_S-AeWWM'\r\n url = 'https://api.maltiverse.com/collection/WZ0XJHIB8jmkCY9eLpr0/download?filetype=sha256'\r\n headers = { 'Authorization':'Bearer ' + api_key }\r\n response = requests.get(url, headers=headers)\r\n re=response.text\r\n r=re.split(\"\\n\")\r\n for i in r:\r\n upload_attr(i)\r" }, { "identifier": "runMalwareBazaar", "path": "Integrations/malware_bazar.py", "snippet": "def runMalwareBazaar(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n zip_url = \"https://bazaar.abuse.ch/export/txt/sha256/full/\"\r\n destination_folder = \"Integrations/\"\r\n if not os.path.exists(destination_folder):\r\n os.makedirs(destination_folder)\r\n download_and_extract(zip_url, destination_folder)\r" }, { "identifier": "runOpenPhish", "path": "Integrations/openphish.py", "snippet": "def runOpenPhish(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"https://openphish.com/feed.txt\").text\r\n req_=req.split(\"\\n\")\r\n for i in req_:\r\n upload_attr(i)\r" }, { "identifier": "runPhishHunt", "path": "Integrations/phishunt.py", "snippet": "def runPhishHunt(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"https://phishunt.io/feed.txt\").text\r\n iocs=req.split(\"\\n\")\r\n for ioc in iocs:\r\n upload_attr(ioc)\r" }, { "identifier": "runRescureMe", "path": "Integrations/rescureme.py", "snippet": "def runRescureMe(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"https://rescure.me/rescure_blacklist.txt\").text\r\n lines = req.split('\\n')\r\n ip_addresses = [line.strip() for line in lines if line and line[0].isdigit()]\r\n for ip_address in ip_addresses:\r\n upload_attr(ip_address)\r\n req1=requests.get(\"https://rescure.me/rescure_malware_hashes.txt\").text\r\n lines = req1.split('\\n')\r\n hash_values = [line.strip() for line in lines if line and len(line) == 40]\r\n for hash_value in hash_values:\r\n upload_attr(hash_value)\r\n\r\n req1=requests.get(\"https://rescure.me/rescure_domain_blacklist.txt\").text\r\n lines = req1.split(' # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #')\r\n lines=lines[2].split(\"\\n\")\r\n for i in lines:\r\n if i==\"\":\r\n pass\r\n else:\r\n upload_attr(i.strip())\r" }, { "identifier": "runSSLbl", "path": "Integrations/sslbl.py", "snippet": "def runSSLbl(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"https://sslbl.abuse.ch/blacklist/sslipblacklist.txt\").text\r\n iocs=req.split(\"\\n\")\r\n for ioc in iocs:\r\n upload_attr(ioc)\r\n req1=requests.get(\"https://sslbl.abuse.ch/blacklist/sslblacklist.csv\").text\r\n lines = req1.split('\\n')\r\n hash_values = [line.split(',')[1] for line in lines if line and not line.startswith(\"#\")]\r\n for hash_value in hash_values:\r\n upload_attr(hash_value)\r" }, { "identifier": "runThreatFox", "path": "Integrations/threatfox.py", "snippet": "def runThreatFox(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n zip_url = [\"https://threatfox.abuse.ch/export/json/sha256/full/\",\"https://threatfox.abuse.ch/export/json/ip-port/full/\",\"https://threatfox.abuse.ch/export/json/domains/full/\"]\r\n destination_folder = \"Integrations/\"\r\n \r\n if not os.path.exists(destination_folder):\r\n os.makedirs(destination_folder)\r\n for url in zip_url:\r\n download_and_extract(url, destination_folder)\r\n read_content()\r\n for r in list([\"Integrations/full_sha256.json\",\"Integrations/full_ip-port.json\",\"Integrations/full_domains.json\"]):\r\n delete_file(str(r))\r" }, { "identifier": "runURLHaus", "path": "Integrations/urlhaus.py", "snippet": "def runURLHaus(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n url = 'https://urlhaus.abuse.ch/downloads/csv_recent/'\r\n response = requests.get(url)\r\n if response.status_code == 200:\r\n csv_data = response.text\r\n csv_file = StringIO(csv_data)\r\n csv_reader = csv.reader(csv_file)\r\n header = next(csv_reader)\r\n for row in csv_reader:\r\n if len(row) >= 9:\r\n upload_attr(row[2])\r\n else:\r\n print(\"Error: Row does not have the expected number of columns\")\r\n\r\n else:\r\n print(f\"Error fetching data. Status code: {response.status_code}\")\r" }, { "identifier": "runVirusShare", "path": "Integrations/virusshare.py", "snippet": "def runVirusShare(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n f=requests.get(\"https://virusshare.com/hashes\")\r\n res=re.findall(\"hashfiles/VirusShare_\\d+\",f.text)\r\n fileName=res[-1].split(\"/\")[1]\r\n malware_md5=list()\r\n url1=f\"https://virusshare.com/hashfiles/{fileName}.md5\"\r\n local_filename = url1.split('/')[-1]\r\n with requests.get(url1, stream=True) as r:\r\n with open(\"Integrations/\"+local_filename, 'wb') as f:\r\n shutil.copyfileobj(r.raw, f)\r\n with open(\"Integrations/\"+local_filename, 'r') as md5_file:\r\n lines = md5_file.readlines()\r\n for line in lines:\r\n if 'http://VirusShare.com' in line or 'Twitter: @VXShare' in line or \"################################\" in line or \"# Malware sample MD5 list for #\" in line or \"# VirusShare_00484.zip #\" in line:\r\n malware_md5.append(line)\r\n with open(\"Integrations/\"+local_filename, 'w') as md5_file:\r\n for i in malware_md5:\r\n md5_file.write(i)\r\n for line in lines:\r\n if line not in malware_md5:\r\n upload_attr(line)\r" }, { "identifier": "runVXVault", "path": "Integrations/vxvault.py", "snippet": "def runVXVault(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"http://vxvault.net/URL_List.php\").text\r\n iocs=req.split(\"\\n\")\r\n for ioc in iocs:\r\n upload_attr(ioc)\r" }, { "identifier": "runManually", "path": "Integrations/manual.py", "snippet": "def runManually(mispapi,mispurl,mispeventid,ioclist):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n ioclist_=list(ioclist)\r\n iocs=ioclist_[0].split(\"\\r\")\r\n for i in iocs:\r\n upload_attr(i.strip())" } ]
from flask import Flask, render_template, redirect, request from Integrations.abuseipdb import runAbuseIP from Integrations.cinsscore import runCinsScore from Integrations.killnet import runKillnet from Integrations.emergingthreats import runEmergingThreats from Integrations.honeydb import runHoneyDB from Integrations.maltiverse import runMaltiverse from Integrations.malware_bazar import runMalwareBazaar from Integrations.openphish import runOpenPhish from Integrations.phishunt import runPhishHunt from Integrations.rescureme import runRescureMe from Integrations.sslbl import runSSLbl from Integrations.threatfox import runThreatFox from Integrations.urlhaus import runURLHaus from Integrations.virusshare import runVirusShare from Integrations.vxvault import runVXVault from Integrations.manual import runManually import threading
3,234
app = Flask(__name__) @app.route('/', methods=['GET', 'POST']) def hello_world(): if request.method == 'POST': operation = request.form['operation'] if operation=="add_manually": return redirect("/manually") else: return redirect('/automaticlly') return render_template('main.html') @app.route("/manually",methods=["GET","POST"]) def manually(): if request.method=="POST": ioclist=request.form.getlist("iocList") mispapi=request.form["mispapi"];mispurl=request.form["mispurl"];mispeventid=request.form["mispeventid"]
app = Flask(__name__) @app.route('/', methods=['GET', 'POST']) def hello_world(): if request.method == 'POST': operation = request.form['operation'] if operation=="add_manually": return redirect("/manually") else: return redirect('/automaticlly') return render_template('main.html') @app.route("/manually",methods=["GET","POST"]) def manually(): if request.method=="POST": ioclist=request.form.getlist("iocList") mispapi=request.form["mispapi"];mispurl=request.form["mispurl"];mispeventid=request.form["mispeventid"]
threading.Thread(target=runManually,args=(mispapi,mispurl,mispeventid,ioclist,)).start()
15
2023-12-23 10:39:28+00:00
4k
facebookresearch/ca_body
ca_body/utils/geom_body.py
[ { "identifier": "index_image_impaint", "path": "ca_body/utils/geom.py", "snippet": "def index_image_impaint(\n index_image: th.Tensor,\n bary_image: Optional[th.Tensor] = None,\n distance_threshold=100.0,\n):\n # getting the mask around the indexes?\n if len(index_image.shape) == 3:\n valid_index = (index_image != -1).any(dim=-1)\n elif len(index_image.shape) == 2:\n valid_index = index_image != -1\n else:\n raise ValueError(\"`index_image` should be a [H,W] or [H,W,C] image\")\n\n invalid_index = ~valid_index\n\n device = index_image.device\n\n valid_ij = th.stack(th.where(valid_index), dim=-1)\n invalid_ij = th.stack(th.where(invalid_index), dim=-1)\n lookup_valid = KDTree(valid_ij.cpu().numpy())\n\n dists, idxs = lookup_valid.query(invalid_ij.cpu())\n\n # TODO: try average?\n idxs = th.as_tensor(idxs, device=device)[..., 0]\n dists = th.as_tensor(dists, device=device)[..., 0]\n\n dist_mask = dists < distance_threshold\n\n invalid_border = th.zeros_like(invalid_index)\n invalid_border[invalid_index] = dist_mask\n\n invalid_src_ij = valid_ij[idxs][dist_mask]\n invalid_dst_ij = invalid_ij[dist_mask]\n\n index_image_imp = index_image.clone()\n\n index_image_imp[invalid_dst_ij[:, 0], invalid_dst_ij[:, 1]] = index_image[\n invalid_src_ij[:, 0], invalid_src_ij[:, 1]\n ]\n\n if bary_image is not None:\n bary_image_imp = bary_image.clone()\n\n bary_image_imp[invalid_dst_ij[:, 0], invalid_dst_ij[:, 1]] = bary_image[\n invalid_src_ij[:, 0], invalid_src_ij[:, 1]\n ]\n\n return index_image_imp, bary_image_imp\n return index_image_imp" }, { "identifier": "make_uv_barys", "path": "ca_body/utils/geom.py", "snippet": "def make_uv_barys(\n vt: th.Tensor,\n vti: th.Tensor,\n uv_shape: Union[Tuple[int, int], int],\n flip_uv: bool = True,\n):\n \"\"\"Compute a UV-space barycentric map where each texel contains barycentric\n coordinates for that texel within its enclosing UV triangle. For texels\n with no assigned triangle, all 3 barycentric coordinates will be 0.\n \"\"\"\n if isinstance(uv_shape, int):\n uv_shape = (uv_shape, uv_shape)\n\n if flip_uv:\n # Flip here because texture coordinates in some of our topo files are\n # stored in OpenGL convention with Y=0 on the bottom of the texture\n # unlike numpy/torch arrays/tensors.\n vt = vt.clone()\n vt[:, 1] = 1 - vt[:, 1]\n\n face_index_map = make_uv_face_index(vt, vti, uv_shape, flip_uv=False).to(vt.device)\n vti_map = vti.long()[face_index_map.clamp(min=0)]\n uv_tri_uvs = vt[vti_map].permute(2, 0, 1, 3)\n\n uv_grid = th.meshgrid(\n th.linspace(0.5, uv_shape[0] - 0.5, uv_shape[0]) / uv_shape[0],\n th.linspace(0.5, uv_shape[1] - 0.5, uv_shape[1]) / uv_shape[1],\n )\n uv_grid = th.stack(uv_grid[::-1], dim=2).to(uv_tri_uvs)\n\n bary_map = bary_coords(uv_grid.view(-1, 2), uv_tri_uvs.view(3, -1, 2))\n bary_map = bary_map.permute(1, 0).view(uv_shape[0], uv_shape[1], 3)\n bary_map[face_index_map < 0] = 0\n return face_index_map, bary_map" }, { "identifier": "make_uv_vert_index", "path": "ca_body/utils/geom.py", "snippet": "def make_uv_vert_index(\n vt: th.Tensor,\n vi: th.Tensor,\n vti: th.Tensor,\n uv_shape: Union[Tuple[int, int], int],\n flip_uv: bool = True,\n):\n \"\"\"Compute a UV-space vertex index map identifying which mesh vertices\n comprise the triangle containing each texel. For texels with no assigned\n triangle, all indices will be -1.\n \"\"\"\n face_index_map = make_uv_face_index(vt, vti, uv_shape, flip_uv).to(vi.device)\n vert_index_map = vi[face_index_map.clamp(min=0)]\n vert_index_map[face_index_map < 0] = -1\n return vert_index_map.long()" } ]
import logging import igl import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F from logging import Logger from typing import Any, Dict, Optional, Tuple, Union from ca_body.utils.geom import ( index_image_impaint, make_uv_barys, make_uv_vert_index, ) from trimesh import Trimesh from trimesh.triangles import points_to_barycentric
3,201
uv_size, dtype=values.dtype, device=values.device, ) values_uv[:, :, index_mask] = values_flat return values_uv def sample_uv( values_uv: th.Tensor, uv_coords: th.Tensor, v2uv: Optional[th.Tensor] = None, mode: str = "bilinear", align_corners: bool = False, flip_uvs: bool = False, ) -> th.Tensor: batch_size = values_uv.shape[0] if flip_uvs: uv_coords = uv_coords.clone() uv_coords[:, 1] = 1.0 - uv_coords[:, 1] uv_coords_norm = (uv_coords * 2.0 - 1.0)[np.newaxis, :, np.newaxis].expand( batch_size, -1, -1, -1 ) values = ( F.grid_sample(values_uv, uv_coords_norm, align_corners=align_corners, mode=mode) .squeeze(-1) .permute((0, 2, 1)) ) if v2uv is not None: values_duplicate = values[:, v2uv] values = values_duplicate.mean(2) # if return_var: # values_var = values_duplicate.var(2) # return values, values_var return values def compute_tbn_uv( tri_xyz: th.Tensor, tri_uv: th.Tensor, eps: float = 1e-5 ) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: """Compute tangents, bitangents, normals. Args: tri_xyz: [B,N,3,3] vertex coordinates tri_uv: [N,2] texture coordinates Returns: tangents, bitangents, normals """ tri_uv = tri_uv[np.newaxis] v01 = tri_xyz[:, :, 1] - tri_xyz[:, :, 0] v02 = tri_xyz[:, :, 2] - tri_xyz[:, :, 0] normals = th.cross(v01, v02, dim=-1) normals = normals / th.norm(normals, dim=-1, keepdim=True).clamp(min=eps) vt01 = tri_uv[:, :, 1] - tri_uv[:, :, 0] vt02 = tri_uv[:, :, 2] - tri_uv[:, :, 0] f = th.tensor([1.0], device=tri_xyz.device) / ( vt01[..., 0] * vt02[..., 1] - vt01[..., 1] * vt02[..., 0] ) tangents = f[..., np.newaxis] * ( v01 * vt02[..., 1][..., np.newaxis] - v02 * vt01[..., 1][..., np.newaxis] ) tangents = tangents / th.norm(tangents, dim=-1, keepdim=True).clamp(min=eps) bitangents = th.cross(normals, tangents, dim=-1) bitangents = bitangents / th.norm(bitangents, dim=-1, keepdim=True).clamp(min=eps).clamp( min=eps ) return tangents, bitangents, normals class GeometryModule(nn.Module): """This module encapsulates uv correspondences and vertex images.""" def __init__( self, vi: th.Tensor, vt: th.Tensor, vti: th.Tensor, v2uv: th.Tensor, uv_size: int, flip_uv: bool = False, impaint: bool = False, impaint_threshold: float = 100.0, device=None, ) -> None: super().__init__() self.register_buffer("vi", th.as_tensor(vi)) self.register_buffer("vt", th.as_tensor(vt)) self.register_buffer("vti", th.as_tensor(vti)) self.register_buffer("v2uv", th.as_tensor(v2uv)) self.uv_size: int = uv_size index_image = make_uv_vert_index( self.vt, self.vi, self.vti, uv_shape=uv_size, flip_uv=flip_uv, ).cpu() face_index, bary_image = make_uv_barys(self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv) if impaint: # TODO: have an option to pre-compute this? assert isinstance(uv_size, int) if uv_size >= 1024: logger.info("impainting index image might take a while for sizes >= 1024")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger: Logger = logging.getLogger(__name__) def face_normals_v2(v: th.Tensor, vi: th.Tensor, eps: float = 1e-5) -> th.Tensor: pts = v[:, vi] v0 = pts[:, :, 1] - pts[:, :, 0] v1 = pts[:, :, 2] - pts[:, :, 0] n = th.cross(v0, v1, dim=-1) norm = th.norm(n, dim=-1, keepdim=True) norm[norm < eps] = 1 n /= norm return n def vert_normals_v2(v: th.Tensor, vi: th.Tensor, eps: float = 1.0e-5) -> th.Tensor: fnorms = face_normals_v2(v, vi) fnorms = fnorms[:, :, None].expand(-1, -1, 3, -1).reshape(fnorms.shape[0], -1, 3) vi_flat = vi.view(1, -1).expand(v.shape[0], -1) vnorms = th.zeros_like(v) for j in range(3): vnorms[..., j].scatter_add_(1, vi_flat, fnorms[..., j]) norm = th.norm(vnorms, dim=-1, keepdim=True) norm[norm < eps] = 1 vnorms /= norm return vnorms def compute_neighbours( n_verts: int, vi: th.Tensor, n_max_values: int = 10 ) -> Tuple[th.Tensor, th.Tensor]: """Computes first-ring neighbours given vertices and faces.""" n_vi = vi.shape[0] adj = {i: set() for i in range(n_verts)} for i in range(n_vi): for idx in vi[i]: adj[idx] |= set(vi[i]) - {idx} nbs_idxs = np.tile(np.arange(n_verts)[:, np.newaxis], (1, n_max_values)) nbs_weights = np.zeros((n_verts, n_max_values), dtype=np.float32) for idx in range(n_verts): n_values = min(len(adj[idx]), n_max_values) nbs_idxs[idx, :n_values] = np.array(list(adj[idx]))[:n_values] nbs_weights[idx, :n_values] = -1.0 / n_values return nbs_idxs, nbs_weights def compute_v2uv(n_verts: int, vi: th.Tensor, vti: th.Tensor, n_max: int = 4) -> th.Tensor: """Computes mapping from vertex indices to texture indices. Args: vi: [F, 3], triangles vti: [F, 3], texture triangles n_max: int, max number of texture locations Returns: [n_verts, n_max], texture indices """ v2uv_dict = {} for i_v, i_uv in zip(vi.reshape(-1), vti.reshape(-1)): v2uv_dict.setdefault(i_v, set()).add(i_uv) assert len(v2uv_dict) == n_verts v2uv = np.zeros((n_verts, n_max), dtype=np.int32) for i in range(n_verts): vals = sorted(v2uv_dict[i]) v2uv[i, :] = vals[0] v2uv[i, : len(vals)] = np.array(vals) return v2uv def values_to_uv(values: th.Tensor, index_img: th.Tensor, bary_img: th.Tensor) -> th.Tensor: uv_size = index_img.shape[0] index_mask = th.all(index_img != -1, dim=-1) idxs_flat = index_img[index_mask].to(th.int64) bary_flat = bary_img[index_mask].to(th.float32) # NOTE: here we assume values_flat = th.sum(values[:, idxs_flat].permute(0, 3, 1, 2) * bary_flat, dim=-1) values_uv = th.zeros( values.shape[0], values.shape[-1], uv_size, uv_size, dtype=values.dtype, device=values.device, ) values_uv[:, :, index_mask] = values_flat return values_uv def sample_uv( values_uv: th.Tensor, uv_coords: th.Tensor, v2uv: Optional[th.Tensor] = None, mode: str = "bilinear", align_corners: bool = False, flip_uvs: bool = False, ) -> th.Tensor: batch_size = values_uv.shape[0] if flip_uvs: uv_coords = uv_coords.clone() uv_coords[:, 1] = 1.0 - uv_coords[:, 1] uv_coords_norm = (uv_coords * 2.0 - 1.0)[np.newaxis, :, np.newaxis].expand( batch_size, -1, -1, -1 ) values = ( F.grid_sample(values_uv, uv_coords_norm, align_corners=align_corners, mode=mode) .squeeze(-1) .permute((0, 2, 1)) ) if v2uv is not None: values_duplicate = values[:, v2uv] values = values_duplicate.mean(2) # if return_var: # values_var = values_duplicate.var(2) # return values, values_var return values def compute_tbn_uv( tri_xyz: th.Tensor, tri_uv: th.Tensor, eps: float = 1e-5 ) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: """Compute tangents, bitangents, normals. Args: tri_xyz: [B,N,3,3] vertex coordinates tri_uv: [N,2] texture coordinates Returns: tangents, bitangents, normals """ tri_uv = tri_uv[np.newaxis] v01 = tri_xyz[:, :, 1] - tri_xyz[:, :, 0] v02 = tri_xyz[:, :, 2] - tri_xyz[:, :, 0] normals = th.cross(v01, v02, dim=-1) normals = normals / th.norm(normals, dim=-1, keepdim=True).clamp(min=eps) vt01 = tri_uv[:, :, 1] - tri_uv[:, :, 0] vt02 = tri_uv[:, :, 2] - tri_uv[:, :, 0] f = th.tensor([1.0], device=tri_xyz.device) / ( vt01[..., 0] * vt02[..., 1] - vt01[..., 1] * vt02[..., 0] ) tangents = f[..., np.newaxis] * ( v01 * vt02[..., 1][..., np.newaxis] - v02 * vt01[..., 1][..., np.newaxis] ) tangents = tangents / th.norm(tangents, dim=-1, keepdim=True).clamp(min=eps) bitangents = th.cross(normals, tangents, dim=-1) bitangents = bitangents / th.norm(bitangents, dim=-1, keepdim=True).clamp(min=eps).clamp( min=eps ) return tangents, bitangents, normals class GeometryModule(nn.Module): """This module encapsulates uv correspondences and vertex images.""" def __init__( self, vi: th.Tensor, vt: th.Tensor, vti: th.Tensor, v2uv: th.Tensor, uv_size: int, flip_uv: bool = False, impaint: bool = False, impaint_threshold: float = 100.0, device=None, ) -> None: super().__init__() self.register_buffer("vi", th.as_tensor(vi)) self.register_buffer("vt", th.as_tensor(vt)) self.register_buffer("vti", th.as_tensor(vti)) self.register_buffer("v2uv", th.as_tensor(v2uv)) self.uv_size: int = uv_size index_image = make_uv_vert_index( self.vt, self.vi, self.vti, uv_shape=uv_size, flip_uv=flip_uv, ).cpu() face_index, bary_image = make_uv_barys(self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv) if impaint: # TODO: have an option to pre-compute this? assert isinstance(uv_size, int) if uv_size >= 1024: logger.info("impainting index image might take a while for sizes >= 1024")
index_image, bary_image = index_image_impaint(
0
2023-12-27 15:31:35+00:00
4k
0x00wolf/hkrsAI
src/conversation.py
[ { "identifier": "SystemPrompt", "path": "src/systemprompt.py", "snippet": "class SystemPrompt:\n \"\"\"A class that manages setting the system prompt used to define AI assistants. \\\n To add a new system prompt that will be selectable from the runtime menu, \\\n copy the prompt to an extensionless file in the appropriate category folder.\"\"\"\n def __init__(self, prompts_dir, path=''):\n self.dir = prompts_dir\n self.path = path\n self.content = ''\n self.title = 'custom'\n self._start()\n\n def _start(self):\n \"\"\"Allow the user to define a custom prompt, or select one of the pre-made options\"\"\"\n if not self.path:\n self.content = input(\"\\n[*] input a custom system prompt, \\\n \\n[*] hit enter to view preexisting options:\\n>\")\n if not self.content:\n self._set()\n else:\n self.content = self._fetch_contents(self.path)\n self.title = self.path.rpartition('/')[-1]\n\n def _set(self):\n \"\"\"Loop that runs until a prompt has been selected\"\"\"\n while True:\n category = self._select_category()\n title = self._select_prompt(category)\n if title == 'back':\n pass\n else:\n self.path = f'{self.dir}/{category}/{title}'\n prompt = self._fetch_contents(self.path)\n print(f'\\n{prompt}\\n')\n set_prompt = input(\"[*] select prompt\\n\\n[-] 'enter' to accept\\n[-] 'n' to go back\\n\"\n \"[-] 'x' to enter a custom font'\\n>\")\n if set_prompt == 'x':\n return SystemPrompt(prompts_dir=self.dir)\n elif set_prompt == 'n':\n pass\n else:\n self.title = self.path.rpartition('/')[-1]\n self.content = prompt\n print(f'[*] system prompt: {self.title}\\n[*] query AI:')\n return\n\n def _select_category(self):\n \"\"\"Select a system prompt category from the pre-made options\"\"\"\n print('\\n[-] categories\\n')\n categories = self._fetch_from(self.dir)\n categories.sort()\n choice = self._make_choice(categories)\n print(f'\\n[*] category: {choice}')\n return choice\n\n def _select_prompt(self, category):\n \"\"\"Select a pre-made system prompt from a particular category\"\"\"\n print('[-] prompts\\n')\n category = f'{self.dir}/{category}'\n system_prompts = self._fetch_from(category)\n system_prompts.sort()\n self.path = self._make_choice(system_prompts, go_back=True)\n return self.path\n\n def _make_choice(self, options_list, go_back=False):\n \"\"\"Provides the user with the ability to select a prompt from an enumerated options list\"\"\"\n # Select from a list of options by the objects enumerated position\n while True:\n try:\n self._enumerate_list(options_list, go_back)\n selection = input('\\n[*] select by position:\\n>')\n selection = int(selection)\n if 1 <= selection <= len(options_list):\n return options_list[selection - 1]\n elif go_back and selection == len(options_list) + 1:\n return 'back'\n except ValueError:\n print('[*] invalid selection')\n\n @staticmethod\n def _enumerate_list(options_list, go_back=False):\n \"\"\"\"Enumerates a list of options\"\"\"\n for x, _item in enumerate(options_list, 1):\n print(f'{x}. {_item}')\n if go_back:\n print(f'{x + 1}. back')\n\n @staticmethod\n def _fetch_contents(file_path):\n \"\"\"Fetches the contents of a file\"\"\"\n try:\n with open(file_path, 'r') as f:\n return f.read()\n except FileNotFoundError:\n pass\n\n @staticmethod\n def _fetch_from(root_dir):\n \"\"\"Returns a list containing the contents of a directory\"\"\"\n directories = os.listdir(root_dir)\n return directories" }, { "identifier": "GPT", "path": "src/gpt.py", "snippet": "class GPT:\n def __init__(self, client, model, temperature, top_p, n, frequency_penalty, presence_penalty, max_tokens):\n self.client = client\n self.model = model\n self.temperature = temperature\n self.top_p = top_p\n self.n = n\n self.frequency_penalty = frequency_penalty\n self.presence_penalty = presence_penalty\n self.max_tokens = max_tokens\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, new_value: str):\n new_value = str(new_value)\n if new_value == 'gpt-3.5-turbo' or new_value == 'gpt-4':\n self._model = new_value\n else:\n raise ValueError(f'\\n{BAD_MODEL.format(new_value)}')\n\n @property\n def temperature(self):\n return self._temperature\n\n @temperature.setter\n def temperature(self, new_value: float):\n new_value = float(new_value)\n if not (0.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_TEMP.format(new_value)}')\n else:\n self._temperature = new_value\n\n @property\n def top_p(self):\n return self._top_p\n\n @top_p.setter\n def top_p(self, new_value: float):\n new_value = float(new_value)\n if not (0 <= new_value <= 1.0):\n raise ValueError(f'\\n{BAD_TP.format(new_value)}')\n else:\n self._top_p = new_value\n\n @property\n def frequency_penalty(self):\n return self._frequency_penalty\n\n @frequency_penalty.setter\n def frequency_penalty(self, new_value: float):\n new_value = float(new_value)\n if not (-2.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_FP.format(new_value)}')\n else:\n self._frequency_penalty = new_value\n\n @property\n def presence_penalty(self):\n return self._presence_penalty\n\n @presence_penalty.setter\n def presence_penalty(self, new_value: float):\n new_value = float(new_value)\n if not (-2.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_PP.format(new_value)}')\n else:\n self._presence_penalty = new_value\n\n @property\n def n(self):\n return self._n\n\n @n.setter\n def n(self, new_value):\n new_value = int(new_value)\n if not (1 <= new_value <= 20):\n raise ValueError(f'\\n{BAD_N.format(new_value)}')\n else:\n self._n = new_value\n\n @property\n def max_tokens(self):\n return self._max_tokens\n\n @max_tokens.setter\n def max_tokens(self, new_value: int):\n new_value = int(new_value)\n if not (1 <= new_value <= 4096):\n raise ValueError(f'\\n{BAD_MT.format(new_value)}')\n else:\n self._max_tokens = new_value" } ]
import dataclasses import openai from typing import List from src.systemprompt import SystemPrompt from src.gpt import GPT
1,946
@dataclasses.dataclass class Conversation: messages: list[dict] = dataclasses.field(default_factory=list) query: str = '' reply: str = '' response: dict = dataclasses.field(default_factory=dict) tokens: int = 0 def start(self, system_prompt: str): self.messages = [{"role": "system", "content": system_prompt}] print() return Conversation(messages=self.messages) def speak(self, content: str): self.messages.append({"role": "user", "content": content}) return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response) def think(self, thought): if self.query == '': self.query = thought else: self.query = f'{self.query}\n{thought}' return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)
@dataclasses.dataclass class Conversation: messages: list[dict] = dataclasses.field(default_factory=list) query: str = '' reply: str = '' response: dict = dataclasses.field(default_factory=dict) tokens: int = 0 def start(self, system_prompt: str): self.messages = [{"role": "system", "content": system_prompt}] print() return Conversation(messages=self.messages) def speak(self, content: str): self.messages.append({"role": "user", "content": content}) return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response) def think(self, thought): if self.query == '': self.query = thought else: self.query = f'{self.query}\n{thought}' return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)
def listen(self, gpt: GPT):
1
2023-12-22 07:04:47+00:00
4k
ccurme/chesster
chesster/app/app.py
[ { "identifier": "BoardManager", "path": "chesster/app/board_manager.py", "snippet": "class BoardManager:\n def __init__(self):\n self.active_websockets: list[WebSocket] = []\n self.last_updated_image = None\n self.board = chess.Board()\n self.player_side = chess.WHITE\n self.interesting_move_iterator = None\n self.chat_history = []\n self.remote_runnable = RemoteRunnable(\n f\"http://{LANGSERVE_HOST}:8001/chesster\", headers={\"x-token\": LANGSERVE_SECRET}\n )\n\n async def set_board(self, board: chess.Board) -> None:\n \"\"\"Set board.\"\"\"\n self.board = board\n await self.update_board(self.board)\n\n async def set_player_side(self, player_side: chess.Color) -> None:\n \"\"\"Set player side.\"\"\"\n self.player_side = player_side\n await self.update_board(self.board)\n\n async def set_interesting_move_iterator(self) -> None:\n \"\"\"Calculate interesting moves in board's move stack.\"\"\"\n self.interesting_move_iterator = self._interesting_move_iterator()\n\n async def make_move(self, move: chess.Move) -> None:\n \"\"\"Parse move and update board.\"\"\"\n self.board.push(move)\n await self.update_board(self.board)\n\n async def _interesting_move_iterator(\n self, centipawn_threshold: int = 100\n ) -> Iterator[chess.Board]:\n \"\"\"Make iterator over interesting moves according to Chess engine.\"\"\"\n new_board = chess.Board()\n centipawns = 0\n for move in self.board.move_stack:\n new_board.push(move)\n new_centipawns = get_engine_score(new_board, self.player_side)\n if new_centipawns is None:\n continue\n delta = new_centipawns - centipawns\n if new_board.turn != self.player_side: # player just moved\n if abs(delta) > centipawn_threshold:\n await self.update_board(new_board)\n yield {\n \"board\": serialize_board_state_with_last_move(\n new_board, self.player_side\n ),\n \"last_move_centipawns\": delta,\n }\n centipawns = new_centipawns\n\n async def update_board(self, board: chess.Board) -> None:\n \"\"\"Update SVG string.\"\"\"\n board_svg = urllib.parse.quote(str(display_board(board, self.player_side)))\n svg_string = f\"data:image/svg+xml,{board_svg}\"\n self.last_updated_image = svg_string\n for websocket in self.active_websockets:\n await websocket.send_text(self.last_updated_image)\n\n async def websocket_endpoint(self, websocket: WebSocket):\n await websocket.accept()\n self.active_websockets.append(websocket)\n try:\n welcome_message = \"Welcome to Chesster!\"\n await websocket.send_text(welcome_message)\n while True:\n data = await websocket.receive_text()\n if data == \"Show me the image\":\n if self.last_updated_image is not None:\n await websocket.send_text(self.last_updated_image)\n else:\n user_message = data\n await websocket.send_text(user_message)\n response_message = await self.remote_runnable.ainvoke(\n {\n \"user_message\": user_message,\n \"chat_history\": self.chat_history,\n }\n )\n self.chat_history.append((user_message, response_message))\n self.chat_history = self.chat_history[-CHAT_HISTORY_LENGTH:]\n await websocket.send_text(response_message)\n except WebSocketDisconnect:\n self.active_websockets.remove(websocket)" }, { "identifier": "get_engine_move", "path": "chesster/app/utils.py", "snippet": "def get_engine_move(board: chess.Board) -> chess.Move:\n \"\"\"Get move from engine.\"\"\"\n engine = get_stockfish_engine()\n engine_result = engine.play(board, chess.engine.Limit(time=0.1))\n engine.quit()\n return engine_result.move" }, { "identifier": "parse_chess_move", "path": "chesster/app/utils.py", "snippet": "def parse_chess_move(board: chess.Board, move_uci: str) -> chess.Move:\n \"\"\"Parse chess move from UCI format.\"\"\"\n try:\n return chess.Move.from_uci(move_uci)\n except chess.InvalidMoveError:\n return board.parse_san(move_uci) # LLM sometimes outputs SAN" }, { "identifier": "parse_pgn_into_move_list", "path": "chesster/app/utils.py", "snippet": "def parse_pgn_into_move_list(game_pgn: str) -> Iterable[chess.Move]:\n \"\"\"Parse PGN into list of Move objects.\"\"\"\n pgn_fp = io.StringIO(game_pgn)\n game = chess.pgn.read_game(pgn_fp)\n return game.mainline_moves()" }, { "identifier": "serialize_board_state", "path": "chesster/app/utils.py", "snippet": "def serialize_board_state(board: chess.Board, player_side: chess.Color) -> str:\n \"\"\"Serialize board state.\"\"\"\n if player_side == chess.BLACK:\n board_picture = str(board.mirror())\n else:\n board_picture = str(board)\n return f\"{board_picture}\\n\\n{chess.Board().variation_san(board.move_stack)}\"" } ]
import time import chess import chess.svg from typing import Any, AsyncIterator from fastapi import FastAPI, Request, WebSocket from fastapi.responses import HTMLResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates from chesster.app.board_manager import BoardManager from chesster.app.utils import ( get_engine_move, parse_chess_move, parse_pgn_into_move_list, serialize_board_state, )
1,743
app = FastAPI() app.mount("/static", StaticFiles(directory="chesster/app/static"), name="static") templates = Jinja2Templates(directory="chesster/app/templates") board_manager = BoardManager() @app.get("/", response_class=HTMLResponse) async def root(request: Request): return templates.TemplateResponse(request, "index.html") @app.post("/set_player_side/{color}") async def set_player_side(color: str) -> dict: """Set side to black or white.""" if "w" in color: player_side = chess.WHITE side_str = "white" else: player_side = chess.BLACK side_str = "black" await board_manager.set_player_side(player_side) return {"message": f"Updated player side successfully to {side_str}."} @app.post("/initialize_game_vs_opponent/{player_side_str}") async def initialize_game_vs_opponent(player_side_str: str) -> dict: """Start new game.""" await board_manager.set_board(chess.Board()) _ = await set_player_side(player_side_str) if board_manager.player_side == chess.BLACK: opponent_move = get_engine_move(board_manager.board) opponent_move_san = board_manager.board.san(opponent_move) await board_manager.make_move(opponent_move) response = f"Game initialized. Opponent move: {opponent_move_san}." else: response = "Game initialized. Your move." return {"message": response} @app.post("/make_move_vs_opponent/{move_str}") async def make_move_vs_opponent(move_str: str) -> dict: """Push move to board against engine. Move should be a valid UCI string.""" if board_manager.board.is_game_over(): return {"message": "Game over."} move = parse_chess_move(board_manager.board, move_str) if not board_manager.board.is_legal(move): return {"message": "Illegal move, try again."} move_san = board_manager.board.san(move) await board_manager.make_move(move) opponent_move = get_engine_move(board_manager.board) opponent_move_san = board_manager.board.san(opponent_move) time.sleep(1) await board_manager.make_move(opponent_move) response = ( f"Successfully made move to {move_san}. Opponent responded by moving" f" to {opponent_move_san}.\n\n"
app = FastAPI() app.mount("/static", StaticFiles(directory="chesster/app/static"), name="static") templates = Jinja2Templates(directory="chesster/app/templates") board_manager = BoardManager() @app.get("/", response_class=HTMLResponse) async def root(request: Request): return templates.TemplateResponse(request, "index.html") @app.post("/set_player_side/{color}") async def set_player_side(color: str) -> dict: """Set side to black or white.""" if "w" in color: player_side = chess.WHITE side_str = "white" else: player_side = chess.BLACK side_str = "black" await board_manager.set_player_side(player_side) return {"message": f"Updated player side successfully to {side_str}."} @app.post("/initialize_game_vs_opponent/{player_side_str}") async def initialize_game_vs_opponent(player_side_str: str) -> dict: """Start new game.""" await board_manager.set_board(chess.Board()) _ = await set_player_side(player_side_str) if board_manager.player_side == chess.BLACK: opponent_move = get_engine_move(board_manager.board) opponent_move_san = board_manager.board.san(opponent_move) await board_manager.make_move(opponent_move) response = f"Game initialized. Opponent move: {opponent_move_san}." else: response = "Game initialized. Your move." return {"message": response} @app.post("/make_move_vs_opponent/{move_str}") async def make_move_vs_opponent(move_str: str) -> dict: """Push move to board against engine. Move should be a valid UCI string.""" if board_manager.board.is_game_over(): return {"message": "Game over."} move = parse_chess_move(board_manager.board, move_str) if not board_manager.board.is_legal(move): return {"message": "Illegal move, try again."} move_san = board_manager.board.san(move) await board_manager.make_move(move) opponent_move = get_engine_move(board_manager.board) opponent_move_san = board_manager.board.san(opponent_move) time.sleep(1) await board_manager.make_move(opponent_move) response = ( f"Successfully made move to {move_san}. Opponent responded by moving" f" to {opponent_move_san}.\n\n"
f"Board state:\n{serialize_board_state(board_manager.board, board_manager.player_side)}"
4
2023-12-24 19:19:31+00:00
4k